source
stringlengths
3
92
c
stringlengths
26
2.25M
barrier.c
// RUN: %compile-run-and-check #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int data, out, flag = 0; #pragma omp target teams num_teams(2) map(tofrom \ : out) map(to \ : data, flag) \ thread_limit(1) #pragma omp parallel num_threads(1) { if (omp_get_team_num() == 0) { /* Write to the data buffer that will be read by thread in team 1 */ data = 42; /* Flush data to thread in team 1 */ #pragma omp barrier /* Set flag to release thread in team 1 */ #pragma omp atomic write flag = 1; } else if (omp_get_team_num() == 1) { /* Loop until we see the update to the flag */ int val; do { #pragma omp atomic read val = flag; } while (val < 1); out = data; #pragma omp barrier } } // CHECK: out=42. /* Value of out will be 42 */ printf("out=%d.\n", out); return !(out == 42); }
mixed_tentusscher_myo_epi_2004_S3_15.c
// Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rc) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S3_15.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.4536778755927,0.00132201466755546,0.776882180760055,0.776714012062442,0.000177819690535583,0.483897765693610,0.00296439161380545,0.999998309612983,1.97077339681427e-08,1.92017794452422e-05,0.999764538691899,1.00700003918174,0.999993820112011,4.74076184702537e-05,0.553071201822523,10.6955005973442,138.863214319207}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5951660451258,0.000103567400537217,0.000134424511464274,0.000261002826954205,0.245313667680283,0.159725689747974,0.167456757509889,4.44865455807541,0.0152791374864867,1.17548122921737,1088.31101235283,0.000516923697255006,0.0952860789411678,0.0200000000000000,0.00400813836385454,4.59261947943359e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 9; /*! \brief number of rows in the data */ uint64_t num_row_{0}; /*! \brief number of columns in the data */ uint64_t num_col_{0}; /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; /*! \brief default constructor */ MetaInfo() = default; MetaInfo& operator=(MetaInfo const& that) { this->num_row_ = that.num_row_; this->num_col_ = that.num_col_; this->num_nonzero_ = that.num_nonzero_; this->labels_.Resize(that.labels_.Size()); this->labels_.Copy(that.labels_); this->group_ptr_ = that.group_ptr_; this->weights_.Resize(that.weights_.Size()); this->weights_.Copy(that.weights_); this->base_margin_.Resize(that.base_margin_.Size()); this->base_margin_.Copy(that.base_margin_); return *this; } /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0) : gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {} inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_page_size != other.gpu_page_size; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid{}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {} BatchIterator<T> begin() { return begin_iter_; } BatchIterator<T> end() { return BatchIterator<T>(nullptr); } private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by * DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); } #endif // XGBOOST_DATA_H_
JacobiSMatTranspose.c
#include "mex.h" #include <omp.h> void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //mex -largeArrayDims GaussSeidelSMatTranspose.c int i; int id, Nthrds, istart, iend; mwIndex j,k; double temp; double omega = 0.8; mwIndex *C_t = mxGetIr(prhs[0]); mwIndex *starts_t = mxGetJc(prhs[0]); double* vals_t = mxGetPr(prhs[0]); mwIndex n = mxGetN(prhs[0]); double* x_in = mxGetPr(prhs[1]); double* b = mxGetPr(prhs[2]); double* invDiag = mxGetPr(prhs[3]); int nu = (int)(*mxGetPr(prhs[4])); /* Output Variables */ double* x_out = 0; double* aux = 0; plhs[0] = mxCreateDoubleMatrix(n, 1, mxREAL); x_out = mxGetPr(plhs[0]); if (nu>1){ aux = (double*)malloc(n*sizeof(double)); }else{ aux = x_in; } #pragma omp parallel shared(vals_t,C_t,starts_t,invDiag,x_in,b,omega,aux) private(i,j,k,temp,id, Nthrds, istart, iend) num_threads(2) { id = omp_get_thread_num(); Nthrds = omp_get_num_threads(); istart = id * n / Nthrds; iend = (id+1) * n / Nthrds; if (id == Nthrds-1)iend = n; for (k = 0 ; k < nu ; k++){ #pragma omp barrier if (nu>1){ for ( i = istart ; i < iend ; ++i){ aux[i] = x_out[i]; } } #pragma omp barrier for ( i = istart ; i < iend ; ++i){ temp = 0.0; for (j = starts_t[i] ; j < starts_t[i+1] ; ++j) { temp += vals_t[j]*aux[C_t[j]]; } x_out[i] = aux[i] + omega*(b[i] - temp)*invDiag[i]; } } } if (nu>1){ free(aux); } }
tutorial_region_prof.c
/* * Copyright (c) 2015, 2016, 2017, 2018, 2019, 2020, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <stdint.h> #include <mpi.h> #ifdef _OPENMP #include <omp.h> #endif #include <geopm.h> #include "tutorial_region.h" #ifdef _OPENMP static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; int err = 0; int num_thread = 1; #pragma omp parallel { num_thread = omp_get_num_threads(); } #pragma omp parallel { int thread_idx = omp_get_thread_num(); (void)geopm_tprof_init_loop(num_thread, thread_idx, num_block, 0); #pragma omp for for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } (void)geopm_tprof_post(); } #pragma omp for for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } } return err; } #endif static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; const double norm = 1.0 / num_block; for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_prof_progress(region_id, i * norm); } for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return 0; } int tutorial_stream_profiled(double big_o, int do_report) { int err = 0; if (big_o != 0.0) { size_t cline_size = 64; size_t num_stream = (size_t)big_o * 500000000; size_t mem_size = sizeof(double) * num_stream; double *a = NULL; double *b = NULL; double *c = NULL; double scalar = 3.0; uint64_t stream_rid; if (!err) { err = geopm_prof_region("tutorial_stream", GEOPM_REGION_HINT_MEMORY, &stream_rid); } err = posix_memalign((void *)&a, cline_size, mem_size); if (!err) { err = posix_memalign((void *)&b, cline_size, mem_size); } if (!err) { err = posix_memalign((void *)&c, cline_size, mem_size); } if (!err) { #pragma omp parallel for for (int i = 0; i < num_stream; i++) { a[i] = 0.0; b[i] = 1.0; c[i] = 2.0; } if (do_report) { printf("Executing profiled STREAM triad on length %d vectors.\n", num_stream); fflush(stdout); } err = geopm_prof_enter(stream_rid); } if (!err) { #ifdef _OPENMP err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c); #else err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c); #endif } if (!err) { err = geopm_prof_exit(stream_rid); } if (!err) { free(c); free(b); free(a); } } }
convolution_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(4*4, inch, outch, 2ul); // G const short ktm[4][3] = { { 2, 0, 0}, { 1, 1, 1}, { 1, -1, 1}, { 0, 0, 2} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<4; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; const short* kernel4 = (const short*)kernel_tm + (p+4)*inch*16; const short* kernel5 = (const short*)kernel_tm + (p+5)*inch*16; const short* kernel6 = (const short*)kernel_tm + (p+6)*inch*16; const short* kernel7 = (const short*)kernel_tm + (p+7)*inch*16; short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; kernel4 += 16; kernel5 += 16; kernel6 += 16; kernel7 += 16; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm + (p+0)*inch*16; const short* kernel1 = (const short*)kernel_tm + (p+1)*inch*16; const short* kernel2 = (const short*)kernel_tm + (p+2)*inch*16; const short* kernel3 = (const short*)kernel_tm + (p+3)*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 16; kernel1 += 16; kernel2 += 16; kernel3 += 16; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm + p*inch*16; short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 16; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*4, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j=0; j<nColBlocks; j++) { const signed char* r0 = img + w * j * 2; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; for (int i = 0; i<nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON #if __aarch64__ asm volatile( // load "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v1.8b}, [%1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.8b}, [%2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v3.8b}, [%3] \n" // w = B_t * d, trans int8 to int16 "ssubl v4.8h, v0.8b, v2.8b \n" // d4 "saddl v5.8h, v1.8b, v2.8b \n" // d6 "ssubl v6.8h, v2.8b, v1.8b \n" // d8 "ssubl v7.8h, v3.8b, v1.8b \n" // d10 // transpose w to w_t "trn1 v8.4h, v4.4h, v5.4h \n" "trn2 v9.4h, v4.4h, v5.4h \n" "trn1 v10.4h, v6.4h, v7.4h \n" "trn2 v11.4h, v6.4h, v7.4h \n" "trn1 v0.2s, v8.2s, v10.2s \n" "trn2 v2.2s, v8.2s, v10.2s \n" "trn1 v1.2s, v9.2s, v11.2s \n" "trn2 v3.2s, v9.2s, v11.2s \n" // U = B_t * d_t "sub v4.4h, v0.4h, v2.4h \n" "add v5.4h, v1.4h, v2.4h \n" "sub v6.4h, v2.4h, v1.4h \n" "sub v7.4h, v3.4h, v1.4h \n" // save "st1 {v4.4h}, [%4] \n" "st1 {v5.4h}, [%5] \n" "st1 {v6.4h}, [%6] \n" "st1 {v7.4h}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else asm volatile( // load "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "pld [%1, #64] \n" "vld1.s8 {d1}, [%1] \n" "pld [%2, #64] \n" "vld1.s8 {d2}, [%2] \n" "pld [%3, #64] \n" "vld1.s8 {d3}, [%3] \n" // w = B_t * d, trans int8 to int16 "vsubl.s8 q2, d0, d2 \n" // d4 "vaddl.s8 q3, d1, d2 \n" // d6 "vsubl.s8 q4, d2, d1 \n" // d8 "vsubl.s8 q5, d3, d1 \n" // d10 // transpose w to w_t "vtrn.s16 d4, d6 \n" "vtrn.s16 d8, d10 \n" "vtrn.s32 d4, d8 \n" "vtrn.s32 d6, d10 \n" // U = B_t * d_t "vsub.s16 d11, d4, d8 \n" "vadd.s16 d12, d6, d8 \n" "vsub.s16 d13, d8, d6 \n" "vsub.s16 d14, d10, d6 \n" // save "vst1.s32 {d11}, [%4] \n" "vst1.s32 {d12}, [%5] \n" "vst1.s32 {d13}, [%6] \n" "vst1.s32 {d14}, [%7] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(out_tm0), // %4 "=r"(out_tm1), // %5 "=r"(out_tm2), // %6 "=r"(out_tm3) // %7 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "4"(out_tm0), "5"(out_tm1), "6"(out_tm2), "7"(out_tm3) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else short d0[4],d1[4],d2[4],d3[4]; short w0[4],w1[4],w2[4],w3[4]; short t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // U = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n] = d0[n]; out_tm1[n] = d1[n]; out_tm2[n] = d2[n]; out_tm3[n] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<4; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; output4_tm += 16; output5_tm += 16; output6_tm += 16; output7_tm += 16; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 16; output1_tm += 16; output2_tm += 16; output3_tm += 16; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) //"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%1] \n" "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif output0_tm += 16; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in FeatherCNN int nRowBlocks = w_tm/4; #if __ARM_NEON int32x2_t _shift = vdup_n_s32(-2); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2; "sub v1.4s, v1.4s, v2.4s \n" "add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3; "add v1.4s, v1.4s, v3.4s \n" "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "dup v6.2d, v4.d[1] \n" "dup v7.2d, v5.d[1] \n" "add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2; "sub v1.2s, v5.2s, v6.2s \n" "add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3; "add v1.2s, v1.2s, v7.2s \n" "sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2 "sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2 "st1 {v0.2s}, [%1], #8 \n" "st1 {v1.2s}, [%2], #8 \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2; "vsubq.s32 q1, q1, q2 \n" "vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3; "vaddq.s32 q1, q1, q3 \n" "vtrn.s32 q0, q1 \n" "vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2; "vsub.s32 d9, d2, d1 \n" "vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3; "vadd.s32 d9, d9, d3 \n" "vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2 "vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2 "vst1.s32 {d8}, [%1]! \n" "vst1.s32 {d9}, [%2]! \n" : "=r"(out_tile), // %0 "=r"(outRow0), // %1 "=r"(outRow1) // %2 : "0"(out_tile), "1"(outRow0), "2"(outRow1), "w"(_shift) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q4" ); #endif // __aarch64__ #else int s0[4],s1[4],s2[4],s3[4]; int w0[4],w1[4]; int d0[2],d1[2],d2[2],d3[2]; int o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n]; o1[n] = d1[n] - d2[n] + d3[n]; } // save to top blob tm,why right 2,because the G' = G*2 outRow0[0] = o0[0] >> 2; outRow0[1] = o0[1] >> 2; outRow1[0] = o1[0] >> 2; outRow1[1] = o1[1] >> 2; out_tile += 16; outRow0 += 2; outRow1 += 2; #endif // __ARM_NEON } outRow0 += outw; outRow1 += outw; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(6*6, inch, outch, 2ul); // G // const float ktm[6][3] = { // { 1.0f/4, 0.0f, 0.0f}, // { -1.0f/6, -1.0f/6, -1.0f/6}, // { -1.0f/6, 1.0f/6, -1.0f/6}, // { 1.0f/24, 1.0f/12, 1.0f/6}, // { 1.0f/24, -1.0f/12, 1.0f/6}, // { 0.0f, 0.0f, 1.0f} // }; const short ktm[6][3] = { { 6, 0, 0}, { -4, -4, -4}, { -4, 4, -4}, { 1, 2, 4}, { 1, -2, 4}, { 0, 0, 24} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { short* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<9; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4, 2u); int p = 0; for (; p+7<outch; p+=8) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); const short* kernel4 = (const short*)kernel_tm.channel(p+4); const short* kernel5 = (const short*)kernel_tm.channel(p+5); const short* kernel6 = (const short*)kernel_tm.channel(p+6); const short* kernel7 = (const short*)kernel_tm.channel(p+7); short* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p+3<outch; p+=4) { const short* kernel0 = (const short*)kernel_tm.channel(p); const short* kernel1 = (const short*)kernel_tm.channel(p+1); const short* kernel2 = (const short*)kernel_tm.channel(p+2); const short* kernel3 = (const short*)kernel_tm.channel(p+3); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p<outch; p++) { const short* kernel0 = (const short*)kernel_tm.channel(p); short* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); int* outRow0 = top_blob_bordered.channel(p); int* outRow1 = outRow0 + outw; int* outRow2 = outRow0 + outw * 2; int* outRow3 = outRow0 + outw * 3; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = _o0[n] / 576; outRow1[n] = _o1[n] / 576; outRow2[n] = _o2[n] / 576; outRow3[n] = _o3[n] / 576; } #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] / 576; outRow1[n] = o1[n] / 576; outRow2[n] = o2[n] / 576; outRow3[n] = o3[n] / 576; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat &_bias, std::vector<float> scales_dequant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, 2u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const signed char* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const signed char* r0 = img + w * j * 4; const signed char* r1 = r0 + w; const signed char* r2 = r1 + w; const signed char* r3 = r2 + w; const signed char* r4 = r3 + w; const signed char* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { short* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row<short>(q); short* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row<short>(q); short* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row<short>(q); short* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row<short>(q); short* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row<short>(q); short* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row<short>(q); short* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row<short>(q); short* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row<short>(q); short* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row<short>(q); #if __ARM_NEON int8x8_t _d0, _d1, _d2, _d3, _d4, _d5; int16x8_t _w0, _w1, _w2, _w3, _w4, _w5; int16x8_t _t0, _t1, _t2, _t3, _t4, _t5; int16x8_t _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = vld1_s8(r0); _d1 = vld1_s8(r1); _d2 = vld1_s8(r2); _d3 = vld1_s8(r3); _d4 = vld1_s8(r4); _d5 = vld1_s8(r5); int8x8_t _1_n = vdup_n_s8(-1); int8x8_t _2_p = vdup_n_s8(2); int8x8_t _2_n = vdup_n_s8(-2); int8x8_t _4_p = vdup_n_s8(4); int8x8_t _4_n = vdup_n_s8(-4); int8x8_t _5_n = vdup_n_s8(-5); int16x8_t _1_n_s16 = vdupq_n_s16(-1); int16x8_t _2_p_s16 = vdupq_n_s16(2); int16x8_t _2_n_s16 = vdupq_n_s16(-2); int16x8_t _4_p_s16 = vdupq_n_s16(4); int16x8_t _4_n_s16 = vdupq_n_s16(-4); int16x8_t _5_n_s16 = vdupq_n_s16(-5); // w = B_t * d _w0 = vmull_s8(_d0, _4_p); _w0 = vmlal_s8(_w0, _d2, _5_n); _w0 = vaddw_s8(_w0, _d4); _w1 = vmull_s8(_d1, _4_n); _w1 = vmlal_s8(_w1, _d2, _4_n); _w1 = vaddw_s8(_w1, _d3); _w1 = vaddw_s8(_w1, _d4); _w2 = vmull_s8(_d1, _4_p); _w2 = vmlal_s8(_w2, _d2, _4_n); _w2 = vmlal_s8(_w2, _d3, _1_n); _w2 = vaddw_s8(_w2, _d4); _w3 = vmull_s8(_d1, _2_n); _w3 = vmlal_s8(_w3, _d2, _1_n); _w3 = vmlal_s8(_w3, _d3, _2_p); _w3 = vaddw_s8(_w3, _d4); _w4 = vmull_s8(_d1, _2_p); _w4 = vmlal_s8(_w4, _d2, _1_n); _w4 = vmlal_s8(_w4, _d3, _2_n); _w4 = vaddw_s8(_w4, _d4); _w5 = vmull_s8(_d1, _4_p); _w5 = vmlal_s8(_w5, _d3, _5_n); _w5 = vaddw_s8(_w5, _d5); // transpose d to d_t { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } // d = B_t * d_t _n0 = vmulq_s16(_t0, _4_p_s16); _n0 = vmlaq_s16(_n0, _t2, _5_n_s16); _n0 = vaddq_s16(_n0, _t4); _n1 = vmulq_s16(_t1, _4_n_s16); _n1 = vmlaq_s16(_n1, _t2, _4_n_s16); _n1 = vaddq_s16(_n1, _t3); _n1 = vaddq_s16(_n1, _t4); _n2 = vmulq_s16(_t1, _4_p_s16); _n2 = vmlaq_s16(_n2, _t2, _4_n_s16); _n2 = vmlaq_s16(_n2, _t3, _1_n_s16); _n2 = vaddq_s16(_n2, _t4); _n3 = vmulq_s16(_t1, _2_n_s16); _n3 = vmlaq_s16(_n3, _t2, _1_n_s16); _n3 = vmlaq_s16(_n3, _t3, _2_p_s16); _n3 = vaddq_s16(_n3, _t4); _n4 = vmulq_s16(_t1, _2_p_s16); _n4 = vmlaq_s16(_n4, _t2, _1_n_s16); _n4 = vmlaq_s16(_n4, _t3, _2_n_s16); _n4 = vaddq_s16(_n4, _t4); _n5 = vmulq_s16(_t1, _4_p_s16); _n5 = vmlaq_s16(_n5, _t3, _5_n_s16); _n5 = vaddq_s16(_n5, _t5); // save to out_tm out_tm0[0]=_n0[0];out_tm0[1]=_n0[1];out_tm0[2]=_n0[2];out_tm0[3]=_n0[3]; out_tm1[0]=_n0[4];out_tm1[1]=_n0[5];out_tm1[2]=_n1[0];out_tm1[3]=_n1[1]; out_tm2[0]=_n1[2];out_tm2[1]=_n1[3];out_tm2[2]=_n1[4];out_tm2[3]=_n1[5]; out_tm3[0]=_n2[0];out_tm3[1]=_n2[1];out_tm3[2]=_n2[2];out_tm3[3]=_n2[3]; out_tm4[0]=_n2[4];out_tm4[1]=_n2[5];out_tm4[2]=_n3[0];out_tm4[3]=_n3[1]; out_tm5[0]=_n3[2];out_tm5[1]=_n3[3];out_tm5[2]=_n3[4];out_tm5[3]=_n3[5]; out_tm6[0]=_n4[0];out_tm6[1]=_n4[1];out_tm6[2]=_n4[2];out_tm6[3]=_n4[3]; out_tm7[0]=_n4[4];out_tm7[1]=_n4[5];out_tm7[2]=_n5[0];out_tm7[3]=_n5[1]; out_tm8[0]=_n5[2];out_tm8[1]=_n5[3];out_tm8[2]=_n5[4];out_tm8[3]=_n5[5]; #else short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __ARM_NEON r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); int* output4_tm = top_blob_tm.channel(p+4); int* output5_tm = top_blob_tm.channel(p+5); int* output6_tm = top_blob_tm.channel(p+6); int* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "mov w4, %w20 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); "ld1 {v8.4h}, [%8] \n" "ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, %9, #16 \n" "ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, %9, #16 \n" "ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, %9, #16 \n" "ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %8, %8, #8 \n" "add %9, %9, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43) "smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53) "smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63) "smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // "st1 {v4.4s}, [%4] \n" // "st1 {v5.4s}, [%5] \n" // "st1 {v6.4s}, [%6] \n" // "st1 {v7.4s}, [%7] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "vmov.s32 q4, #0 \n" "vmov.s32 q5, #0 \n" "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "mov r4, %20 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr); "add %9, #16 \n" "vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8); "add %9, #16 \n" "vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16); "add %9, #16 \n" "vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24); "add %9, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43) "vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53) "vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63) "vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" "vst1.s32 {d8-d9}, [%4] \n" "vst1.s32 {d10-d11}, [%5] \n" "vst1.s32 {d12-d13}, [%6] \n" "vst1.s32 {d14-d15}, [%7] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(r0), // %8 "=r"(kptr) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(r0), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int sum4[4] = {0}; int sum5[4] = {0}; int sum6[4] = {0}; int sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; sum4[n] += (int)r0[n] * kptr[n+16]; sum5[n] += (int)r0[n] * kptr[n+20]; sum6[n] += (int)r0[n] * kptr[n+24]; sum7[n] += (int)r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p+1); int* output2_tm = top_blob_tm.channel(p+2); int* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "mov w4, %w12 \n" "0: \n" // for (int q=0; q<inch; q++) "prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v8.4h}, [%4] \n" "ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, %5, #16 \n" "ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %4, %4, #8 \n" "add %5, %5, #16 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13) "smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23) "smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory "st1 {v1.4s}, [%1] \n" // "st1 {v2.4s}, [%2] \n" // "st1 {v3.4s}, [%3] \n" // : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "vmov.s32 q1, #0 \n" "vmov.s32 q2, #0 \n" "vmov.s32 q3, #0 \n" "mov r4, %12 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0 "vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr); "add %5, #16 \n" "vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8); "add %5, #16 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13) "vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23) "vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory "vst1.s32 {d2-d3}, [%1] \n" "vst1.s32 {d4-d5}, [%2] \n" "vst1.s32 {d6-d7}, [%3] \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(kptr) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10" ); #endif // __aarch64__ #else int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; sum1[n] += (int)r0[n] * kptr[n+4]; sum2[n] += (int)r0[n] * kptr[n+8]; sum3[n] += (int)r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __ARM_NEON output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { int* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const short* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const short* r0 = bottom_blob_tm.channel(tiles*r+i); #if __ARM_NEON #if __aarch64__ asm volatile( // inch loop "eor v0.16b, v0.16b, v0.16b \n" "mov w4, %w6 \n" "0: \n" // for (int q=0; q<inch; q++) "ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %1, %1, #8 \n" "add %2, %2, #8 \n" "subs w4, w4, #1 \n" "smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03) "bne 0b \n" // end for "st1 {v0.4s}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); #else asm volatile( // inch loop "vmov.s32 q0, #0 \n" "mov r4, %6 \n" "0: \n" // for (int q=0; q<inch; q++) "vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0 "add %1, #8 \n" "vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr); "add %2, #8 \n" "vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03) "subs r4, r4, #1 \n" "bne 0b \n" // end for "vst1.s32 {d0-d1}, [%0] \n" // store the result to memory : "=r"(output0_tm), // %0 "=r"(r0), // %1 "=r"(kptr) // %2 : "0"(output0_tm), "1"(r0), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q8", "q9" ); #endif // __aarch64__ #else // __ARM_NEON int sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __ARM_NEON output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // int* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const short* r0 = bottom_blob_tm.channel(q).row<short>(i); // const short* k0 = kernel0_tm.row<short>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { int* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; const float scale_dequant0 = scales_dequant[p]; const float scale0 = scale_dequant0 / 576.0; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { #if __ARM_NEON int32x4_t _s0, _s1, _s2, _s3, _s4, _s5; int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n; int32x4_t _w0, _w1, _w2, _w3; int32x2_t _w0n, _w1n, _w2n, _w3n; int32x4_t _d0, _d1, _d2, _d3, _d4, _d5; int32x4_t _o0, _o1, _o2, _o3; // load _s0 = vld1q_s32(out_tile); _s0n = vld1_s32(out_tile+4); _s1 = vld1q_s32(out_tile+6); _s1n = vld1_s32(out_tile+10); _s2 = vld1q_s32(out_tile+12); _s2n = vld1_s32(out_tile+16); _s3 = vld1q_s32(out_tile+18); _s3n = vld1_s32(out_tile+22); _s4 = vld1q_s32(out_tile+24); _s4n = vld1_s32(out_tile+28); _s5 = vld1q_s32(out_tile+30); _s5n = vld1_s32(out_tile+34); // w = A_T * W int32x2_t _tp0 = {-1, 2}; int32x2_t _tp1 = {-2, 4}; int32x2_t _tp2 = {8, -8}; _w0 = vaddq_s32(_s0, _s1); _w0n = vadd_s32(_s0n, _s1n); _w0 = vaddq_s32(_w0, _s2); _w0n = vadd_s32(_w0n, _s2n); _w0 = vaddq_s32(_w0, _s3); _w0n = vadd_s32(_w0n, _s3n); _w0 = vaddq_s32(_w0, _s4); _w0n = vadd_s32(_w0n, _s4n); _w1 = vsubq_s32(_s1, _s2); _w1n = vsub_s32(_s1n, _s2n); _w1 = vmlaq_lane_s32(_w1, _s3, _tp0, 1); _w1n = vmla_lane_s32(_w1n, _s3n, _tp0, 1); _w1 = vmlaq_lane_s32(_w1, _s4, _tp1, 0); _w1n = vmla_lane_s32(_w1n, _s4n, _tp1, 0); _w2 = vaddq_s32(_s1, _s2); _w2n = vadd_s32(_s1n, _s2n); _w2 = vmlaq_lane_s32(_w2, _s3, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s3n, _tp1, 1); _w2 = vmlaq_lane_s32(_w2, _s4, _tp1, 1); _w2n = vmla_lane_s32(_w2n, _s4n, _tp1, 1); _w3 = vsubq_s32(_s1, _s2); _w3n = vsub_s32(_s1n, _s2n); _w3 = vmlaq_lane_s32(_w3, _s3, _tp2, 0); _w3n = vmla_lane_s32(_w3n, _s3n, _tp2, 0); _w3 = vmlaq_lane_s32(_w3, _s4, _tp2, 1); _w3n = vmla_lane_s32(_w3n, _s4n, _tp2, 1); _w3 = vaddq_s32(_w3, _s5); _w3n = vadd_s32(_w3n, _s5n); // transpose w to w_t { _d0[0] = _w0[0]; _d0[1] = _w1[0]; _d0[2] = _w2[0]; _d0[3] = _w3[0]; _d1[0] = _w0[1]; _d1[1] = _w1[1]; _d1[2] = _w2[1]; _d1[3] = _w3[1]; _d2[0] = _w0[2]; _d2[1] = _w1[2]; _d2[2] = _w2[2]; _d2[3] = _w3[2]; _d3[0] = _w0[3]; _d3[1] = _w1[3]; _d3[2] = _w2[3]; _d3[3] = _w3[3]; _d4[0] = _w0n[0]; _d4[1] = _w1n[0]; _d4[2] = _w2n[0]; _d4[3] = _w3n[0]; _d5[0] = _w0n[1]; _d5[1] = _w1n[1]; _d5[2] = _w2n[1]; _d5[3] = _w3n[1]; } // Y = A_T * w_t _o0 = vaddq_s32(_d0, _d1); _o0 = vaddq_s32(_o0, _d2); _o0 = vaddq_s32(_o0, _d3); _o0 = vaddq_s32(_o0, _d4); _o1 = vsubq_s32(_d1, _d2); _o1 = vmlaq_lane_s32(_o1, _d3, _tp0, 1); _o1 = vmlaq_lane_s32(_o1, _d4, _tp1, 0); _o2 = vaddq_s32(_d1, _d2); _o2 = vmlaq_lane_s32(_o2, _d3, _tp1, 1); _o2 = vmlaq_lane_s32(_o2, _d4, _tp1, 1); _o3 = vsubq_s32(_d1, _d2); _o3 = vmlaq_lane_s32(_o3, _d3, _tp2, 0); _o3 = vmlaq_lane_s32(_o3, _d4, _tp2, 1); _o3 = vaddq_s32(_o3, _d5); // save to top blob tm float32x4_t _scale0 = vdupq_n_f32(scale0); float32x4_t _out0_f32 = vdupq_n_f32(bias0); float32x4_t _out1_f32 = vdupq_n_f32(bias0); float32x4_t _out2_f32 = vdupq_n_f32(bias0); float32x4_t _out3_f32 = vdupq_n_f32(bias0); _out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0); _out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_o1), _scale0); _out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_o2), _scale0); _out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0); vst1q_f32(outRow0, _out0_f32); vst1q_f32(outRow1, _out1_f32); vst1q_f32(outRow2, _out2_f32); vst1q_f32(outRow3, _out3_f32); #else int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; int w0[6],w1[6],w2[6],w3[6]; int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; int o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = (float)o0[n] * scale0 + bias0; outRow1[n] = (float)o1[n] * scale0 + bias0; outRow2[n] = (float)o2[n] * scale0 + bias0; outRow3[n] = (float)o3[n] * scale0 + bias0; } #endif // __ARM_NEON out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*9, inch, outch/8 + outch%8, (size_t)1u); const signed char* kernel = _kernel; int p=0; for (; p+7<outch; p+=8) { const signed char* k0 = kernel + (p+0)*inch*9; const signed char* k1 = kernel + (p+1)*inch*9; const signed char* k2 = kernel + (p+2)*inch*9; const signed char* k3 = kernel + (p+3)*inch*9; const signed char* k4 = kernel + (p+4)*inch*9; const signed char* k5 = kernel + (p+5)*inch*9; const signed char* k6 = kernel + (p+6)*inch*9; const signed char* k7 = kernel + (p+7)*inch*9; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p<outch; p++) { const signed char* k0 = kernel + (p+0)*inch*9; signed char* ktmp = kernel_tm.channel(p/8 + p%8); for (int q=0; q<inch; q++) { for (int k=0; k<9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p+0); Mat out1 = top_blob.channel(p+1); Mat out2 = top_blob.channel(p+2); Mat out3 = top_blob.channel(p+3); Mat out4 = top_blob.channel(p+4); Mat out5 = top_blob.channel(p+5); Mat out6 = top_blob.channel(p+6); Mat out7 = top_blob.channel(p+7); out0.fill(0); out1.fill(0); out2.fill(0); out3.fill(0); out4.fill(0); out5.fill(0); out6.fill(0); out7.fill(0); const signed char* ktmp = _kernel.channel(p/8); for (int q=0; q<inch; q++) { int* outptr0 = out0; int* outptr1 = out1; int* outptr2 = out2; int* outptr3 = out3; int* outptr4 = out4; int* outptr5 = out5; int* outptr6 = out6; int* outptr7 = out7; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%9], #16 \n"//r0-r2 "ld2 {v5.8b, v6.8b}, [%9] \n" "ld1 {v8.4s, v9.4s}, [%1] \n"//out0 "ld1 {v10.4s, v11.4s}, [%2] \n"//out1 "ld1 {v12.4s, v13.4s}, [%3] \n"//out2 "ld1 {v14.4s, v15.4s}, [%4] \n"//out3 "ld1 {v16.4s, v17.4s}, [%5] \n"//out4 "ld1 {v18.4s, v19.4s}, [%6] \n"//out5 "ld1 {v20.4s, v21.4s}, [%7] \n"//out6 "ld1 {v22.4s, v23.4s}, [%8] \n"//out7 "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k00-k70) "sshll v1.8h, v1.8b, #0 \n"//(k01-k71) "sshll v2.8h, v2.8b, #0 \n"//(k02-k72) "sshll v3.8h, v3.8b, #0 \n"// r0 "sshll v4.8h, v4.8b, #0 \n"// r1 "sshll v7.8h, v7.8b, #0 \n"// r2 // r0 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r00-r07)*k00 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r00-r07)*k10 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r00-r07)*k20 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r00-r07)*k30 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r00-r07)*k40 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r00-r07)*k50 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r00-r07)*k60 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r00-r07)*k70 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r1 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r10-r17)*k01 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r10-r17)*k11 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r10-r17)*k21 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r10-r17)*k31 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r10-r17)*k41 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r10-r17)*k51 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r10-r17)*k61 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r10-r17)*k71 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r2 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r20-r27)*k02 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r20-r27)*k12 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r20-r27)*k22 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r20-r27)*k32 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r20-r27)*k42 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r20-r27)*k52 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r20-r27)*k62 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r20-r27)*k72 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%10], #16 \n"//r3-r5 "ld2 {v5.8b, v6.8b}, [%10] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k03-k73) "sshll v1.8h, v1.8b, #0 \n"//(k04-k74) "sshll v2.8h, v2.8b, #0 \n"//(k05-k75) "sshll v3.8h, v3.8b, #0 \n"// r3 "sshll v4.8h, v4.8b, #0 \n"// r4 "sshll v7.8h, v7.8b, #0 \n"// r5 // r3 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r30-r37)*k03 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r30-r37)*k13 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r30-r37)*k23 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r30-r37)*k33 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r30-r37)*k43 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r30-r37)*k53 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r30-r37)*k63 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r30-r37)*k73 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r4 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r40-r47)*k04 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r40-r47)*k14 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r40-r47)*k24 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r40-r47)*k34 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r40-r47)*k44 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r40-r47)*k54 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r40-r47)*k64 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r40-r47)*k74 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r5 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r50-r57)*k05 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r50-r57)*k15 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r50-r57)*k25 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r50-r57)*k35 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r50-r57)*k45 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r50-r57)*k55 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r50-r57)*k65 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r50-r57)*k75 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n"//ktmp "ld2 {v3.8b, v4.8b}, [%11], #16 \n"//r6-r8 "ld2 {v5.8b, v6.8b}, [%11] \n" "ext v7.8b, v3.8b, v5.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k06-k76) "sshll v1.8h, v1.8b, #0 \n"//(k07-k77) "sshll v2.8h, v2.8b, #0 \n"//(k08-k78) "sshll v3.8h, v3.8b, #0 \n"// r6 "sshll v4.8h, v4.8b, #0 \n"// r7 "sshll v7.8h, v7.8b, #0 \n"// r8 // r6 "smlal v8.4s, v3.4h, v0.h[0] \n"// out0 += (r60-r67)*k06 "smlal2 v9.4s, v3.8h, v0.h[0] \n" "smlal v10.4s, v3.4h, v0.h[1] \n"// out1 += (r60-r67)*k16 "smlal2 v11.4s, v3.8h, v0.h[1] \n" "smlal v12.4s, v3.4h, v0.h[2] \n"// out2 += (r60-r67)*k26 "smlal2 v13.4s, v3.8h, v0.h[2] \n" "smlal v14.4s, v3.4h, v0.h[3] \n"// out3 += (r60-r67)*k36 "smlal2 v15.4s, v3.8h, v0.h[3] \n" "smlal v16.4s, v3.4h, v0.h[4] \n"// out4 += (r60-r67)*k46 "smlal2 v17.4s, v3.8h, v0.h[4] \n" "smlal v18.4s, v3.4h, v0.h[5] \n"// out5 += (r60-r67)*k56 "smlal2 v19.4s, v3.8h, v0.h[5] \n" "smlal v20.4s, v3.4h, v0.h[6] \n"// out6 += (r60-r67)*k66 "smlal2 v21.4s, v3.8h, v0.h[6] \n" "smlal v22.4s, v3.4h, v0.h[7] \n"// out7 += (r60-r67)*k76 "smlal2 v23.4s, v3.8h, v0.h[7] \n" // r7 "smlal v8.4s, v4.4h, v1.h[0] \n"// out0 += (r70-r77)*k07 "smlal2 v9.4s, v4.8h, v1.h[0] \n" "smlal v10.4s, v4.4h, v1.h[1] \n"// out1 += (r70-r77)*k17 "smlal2 v11.4s, v4.8h, v1.h[1] \n" "smlal v12.4s, v4.4h, v1.h[2] \n"// out2 += (r70-r77)*k27 "smlal2 v13.4s, v4.8h, v1.h[2] \n" "smlal v14.4s, v4.4h, v1.h[3] \n"// out3 += (r70-r77)*k37 "smlal2 v15.4s, v4.8h, v1.h[3] \n" "smlal v16.4s, v4.4h, v1.h[4] \n"// out4 += (r70-r77)*k47 "smlal2 v17.4s, v4.8h, v1.h[4] \n" "smlal v18.4s, v4.4h, v1.h[5] \n"// out5 += (r70-r77)*k57 "smlal2 v19.4s, v4.8h, v1.h[5] \n" "smlal v20.4s, v4.4h, v1.h[6] \n"// out6 += (r70-r77)*k67 "smlal2 v21.4s, v4.8h, v1.h[6] \n" "smlal v22.4s, v4.4h, v1.h[7] \n"// out7 += (r70-r77)*k77 "smlal2 v23.4s, v4.8h, v1.h[7] \n" // r8 "smlal v8.4s, v7.4h, v2.h[0] \n"// out0 += (r80-r87)*k08 "smlal2 v9.4s, v7.8h, v2.h[0] \n" "smlal v10.4s, v7.4h, v2.h[1] \n"// out1 += (r80-r87)*k18 "smlal2 v11.4s, v7.8h, v2.h[1] \n" "smlal v12.4s, v7.4h, v2.h[2] \n"// out2 += (r80-r87)*k28 "smlal2 v13.4s, v7.8h, v2.h[2] \n" "smlal v14.4s, v7.4h, v2.h[3] \n"// out3 += (r80-r87)*k38 "smlal2 v15.4s, v7.8h, v2.h[3] \n" "smlal v16.4s, v7.4h, v2.h[4] \n"// out4 += (r80-r87)*k48 "smlal2 v17.4s, v7.8h, v2.h[4] \n" "smlal v18.4s, v7.4h, v2.h[5] \n"// out5 += (r80-r87)*k58 "smlal2 v19.4s, v7.8h, v2.h[5] \n" "smlal v20.4s, v7.4h, v2.h[6] \n"// out6 += (r80-r87)*k68 "smlal2 v21.4s, v7.8h, v2.h[6] \n" "smlal v22.4s, v7.4h, v2.h[7] \n"// out7 += (r80-r87)*k78 "smlal2 v23.4s, v7.8h, v2.h[7] \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" "st1 {v16.4s, v17.4s}, [%5], #32 \n" "st1 {v18.4s, v19.4s}, [%6], #32 \n" "st1 {v20.4s, v21.4s}, [%7], #32 \n" "st1 {v22.4s, v23.4s}, [%8], #32 \n" "subs %w0, %w0, #1 \n" "sub %12, %12, #72 \n"// reset ktmp "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.s32 {d16-d17}, [%1] \n"// out0 "pld [%2, #128] \n" "vld1.s32 {d18-d19}, [%2] \n"// out1 "pld [%3, #128] \n" "vld1.s32 {d20-d21}, [%3] \n"// out2 "pld [%4, #128] \n" "vld1.s32 {d22-d23}, [%4] \n"// out3 // r0 "pld [%9, #64] \n" "vld2.s8 {d8-d9}, [%9] \n"// d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015) "add %9, #8 \n" "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k00-k70) d1(k01-k71) d2(k02-k72) "pld [%5, #128] \n" "vld1.s32 {d24-d25}, [%5] \n"// out4 "pld [%6, #128] \n" "vld1.s32 {d26-d27}, [%6] \n"// out5 "vmovl.s8 q2, d2 \n"// q2(k02-k72) "vmovl.s8 q1, d1 \n"// q1(k01-k71) "vmovl.s8 q0, d0 \n"// q0(k00-k70) "vext.s8 d12, d8, d8, #1 \n"// d12(a02 a04 a06 a08 x x x x) "pld [%7, #128] \n" "vld1.s32 {d28-d29}, [%7] \n"// out6 "vmovl.s8 q5, d9 \n"// q5(a01 a03 a05 a07 a09 a011 a013 a015) d11 "vmovl.s8 q4, d8 \n"// q4(a00 a02 a04 a06 a08 a010 a012 a014) d9 "vmovl.s8 q6, d12 \n"// q6(a02 a04 a06 a08 a010 a012 a014 a016) d13 "pld [%8, #128] \n" "vld1.s32 {d30-d31}, [%8] \n"// out7 "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a00 a02 a04 a06) * k00 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a00 a02 a04 a06) * k10 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a00 a02 a04 a06) * k20 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a00 a02 a04 a06) * k30 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a00 a02 a04 a06) * k40 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a00 a02 a04 a06) * k50 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a00 a02 a04 a06) * k60 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a00 a02 a04 a06) * k70 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a01-a07) * k01 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a01-a07) * k11 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a01-a07) * k21 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a01-a07) * k31 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a01-a07) * k41 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a01-a07) * k51 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a01-a07) * k61 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a01-a07) * k71 "pld [%10, #64] \n" "vld2.s8 {d8-d9}, [%10] \n"// d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115) "add %10, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a02-a08) * k02 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a02-a08) * k12 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a02-a08) * k22 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a02-a08) * k32 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k03-k73) d1(k04-k74) d2(k05-k75) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a02-a08) * k42 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a02-a08) * k52 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a02-a08) * k62 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a02-a08) * k72 // r1 "vext.s8 d12, d8, d8, #1 \n"// d12(a12 a14 a16 a18 x x x x) "vmovl.s8 q2, d2 \n"// q2(k05-k75) "vmovl.s8 q1, d1 \n"// q1(k04-k74) "vmovl.s8 q0, d0 \n"// q0(k03-k73) "vmovl.s8 q5, d9 \n"// q5(a11-a115) "vmovl.s8 q4, d8 \n"// q4(a10-a114) "vmovl.s8 q6, d12 \n"// q6(a12-a116) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a10-a16) * k03 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a10-a16) * k13 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a10-a16) * k23 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a10-a16) * k33 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a10-a16) * k43 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a10-a16) * k53 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a10-a16) * k63 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a10-a16) * k73 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a11-a17) * k04 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a11-a17) * k14 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a11-a17) * k24 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a11-a17) * k34 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a11-a17) * k44 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a11-a17) * k54 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a11-a17) * k64 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a11-a17) * k74 "pld [%11, #64] \n" "vld2.s8 {d8-d9}, [%11] \n"// d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215) "add %11, #8 \n" "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a12-a18) * k05 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a12-a18) * k15 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a12-a18) * k25 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a12-a18) * k35 "pld [%12, #64] \n" "vld1.s8 {d0-d2}, [%12]! \n"// d0(k06-k76) d1(k07-k77) d2(k08-k78) "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a12-a18) * k45 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a12-a18) * k55 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a12-a18) * k65 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a12-a18) * k75 // r2 "vext.s8 d12, d8, d8, #1 \n"// d12(a22 a24 a26 a28 x x x x) "vmovl.s8 q2, d2 \n"// q2(k08-k78) "vmovl.s8 q1, d1 \n"// q1(k07-k77) "vmovl.s8 q0, d0 \n"// q0(k06-k76) "vmovl.s8 q5, d9 \n"// q5(a21-a215) "vmovl.s8 q4, d8 \n"// q4(a20-a214) "vmovl.s8 q6, d12 \n"// q6(a22-a216) "vmlal.s16 q8, d8, d0[0] \n"// sum0 += (a20-a26) * k06 "vmlal.s16 q9, d8, d0[1] \n"// sum1 += (a20-a26) * k16 "vmlal.s16 q10, d8, d0[2] \n"// sum2 += (a20-a26) * k26 "vmlal.s16 q11, d8, d0[3] \n"// sum3 += (a20-a26) * k36 "vmlal.s16 q12, d8, d1[0] \n"// sum4 += (a20-a26) * k46 "vmlal.s16 q13, d8, d1[1] \n"// sum5 += (a20-a26) * k56 "vmlal.s16 q14, d8, d1[2] \n"// sum6 += (a20-a26) * k66 "vmlal.s16 q15, d8, d1[3] \n"// sum7 += (a20-a26) * k76 "vmlal.s16 q8, d10, d2[0] \n"// sum0 += (a21-a27) * k07 "vmlal.s16 q9, d10, d2[1] \n"// sum1 += (a21-a27) * k17 "vmlal.s16 q10, d10, d2[2] \n"// sum2 += (a21-a27) * k27 "vmlal.s16 q11, d10, d2[3] \n"// sum3 += (a21-a27) * k37 "vmlal.s16 q12, d10, d3[0] \n"// sum4 += (a21-a27) * k47 "vmlal.s16 q13, d10, d3[1] \n"// sum5 += (a21-a27) * k57 "vmlal.s16 q14, d10, d3[2] \n"// sum6 += (a21-a27) * k67 "vmlal.s16 q15, d10, d3[3] \n"// sum7 += (a21-a27) * k77 "vmlal.s16 q8, d12, d4[0] \n"// sum0 += (a22-a28) * k08 "vmlal.s16 q9, d12, d4[1] \n"// sum1 += (a22-a28) * k18 "vmlal.s16 q10, d12, d4[2] \n"// sum2 += (a22-a28) * k28 "vmlal.s16 q11, d12, d4[3] \n"// sum3 += (a22-a28) * k38 "vmlal.s16 q12, d12, d5[0] \n"// sum4 += (a22-a28) * k48 "vmlal.s16 q13, d12, d5[1] \n"// sum5 += (a22-a28) * k58 "vmlal.s16 q14, d12, d5[2] \n"// sum6 += (a22-a28) * k68 "vmlal.s16 q15, d12, d5[3] \n"// sum7 += (a22-a28) * k78 // save s32 to memory "sub %12, %12, #72 \n" "vst1.s32 {d16-d17}, [%1]! \n"// out0 "vst1.s32 {d18-d19}, [%2]! \n"// out1 "vst1.s32 {d20-d21}, [%3]! \n"// out2 "vst1.s32 {d22-d23}, [%4]! \n"// out3 "subs %0, #1 \n" "vst1.s32 {d24-d25}, [%5]! \n"// out4 "vst1.s32 {d26-d27}, [%6]! \n"// out5 "vst1.s32 {d28-d29}, [%7]! \n"// out6 "vst1.s32 {d30-d31}, [%8]! \n"// out7 "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON #if __aarch64__ int8x8_t _r0_s8 = vld1_s8(r0);// (a00 a01 a02 ....) int8x8_t _r1_s8 = vld1_s8(r1);// (a10 a11 a12 ....) int8x8_t _r2_s8 = vld1_s8(r2);// (a20 a21 a22 ....) int16x8_t _r0 = vmovl_s8(_r0_s8); int16x8_t _r1 = vmovl_s8(_r1_s8); int16x8_t _r2 = vmovl_s8(_r2_s8); int32x4_t _sum03, _sum47; _sum03 = vld1q_lane_s32(outptr0, _sum03, 0);// out0 _sum03 = vld1q_lane_s32(outptr1, _sum03, 1);// out1 _sum03 = vld1q_lane_s32(outptr2, _sum03, 2);// out2 _sum03 = vld1q_lane_s32(outptr3, _sum03, 3);// out3 _sum47 = vld1q_lane_s32(outptr4, _sum47, 0);// out4 _sum47 = vld1q_lane_s32(outptr5, _sum47, 1);// out5 _sum47 = vld1q_lane_s32(outptr6, _sum47, 2);// out6 _sum47 = vld1q_lane_s32(outptr7, _sum47, 3);// out7 // k0 - k2 int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70) int8x8_t _k1_8 = vld1_s8(ktmp+8); //(k01-k71) int8x8_t _k2_8 = vld1_s8(ktmp+16); //(k02-k72) int16x8_t _k0 = vmovl_s8(_k0_8); int16x8_t _k1 = vmovl_s8(_k1_8); int16x8_t _k2 = vmovl_s8(_k2_8); int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0); int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0); int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1); int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2); // k3 - k5 _k0_8 = vld1_s8(ktmp+24); //(k03-k73) _k1_8 = vld1_s8(ktmp+32); //(k04-k74) _k2_8 = vld1_s8(ktmp+40); //(k05-k75) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2); // k6 - k8 _k0_8 = vld1_s8(ktmp+48); //(k06-k76) _k1_8 = vld1_s8(ktmp+56); //(k07-k77) _k2_8 = vld1_s8(ktmp+64); //(k08-k78) _k0 = vmovl_s8(_k0_8); _k1 = vmovl_s8(_k1_8); _k2 = vmovl_s8(_k2_8); _sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0); _sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0); _sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1); _sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1); _sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2); _sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2); _sum0 = vaddq_s32(_sum0, _sum1); _sum0n = vaddq_s32(_sum0n, _sum1n); _sum03 = vaddq_s32(_sum03, _sum0); _sum47 = vaddq_s32(_sum47, _sum0n); vst1q_lane_s32(outptr0, _sum03, 0); vst1q_lane_s32(outptr1, _sum03, 1); vst1q_lane_s32(outptr2, _sum03, 2); vst1q_lane_s32(outptr3, _sum03, 3); vst1q_lane_s32(outptr4, _sum47, 0); vst1q_lane_s32(outptr5, _sum47, 1); vst1q_lane_s32(outptr6, _sum47, 2); vst1q_lane_s32(outptr7, _sum47, 3); outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #else // __aarch64__ asm volatile( "pld [%8, #64] \n" "vld1.s8 {d0}, [%8] \n"// d0(a00 a01 a02 ....) "pld [%9, #64] \n" "vld1.s8 {d2}, [%9] \n"// d2(a10 a11 a12 ....) "pld [%10, #64] \n" "vld1.s8 {d4}, [%10] \n"// d4(a20 a21 a22 ....) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n"// d6(k00-k70) d7(k01-k71) d8(k02-k72) "vmovl.s8 q0, d0 \n"// d0(a00 a01 a02 x) "vmovl.s8 q1, d2 \n"// d2(a10 a11 a12 x) "vmovl.s8 q2, d4 \n"// d4(a20 a21 a22 x) "vmovl.s8 q5, d8 \n"// d10(k02-k32) d11(k42-k72) "vmovl.s8 q4, d7 \n"// d8(k01-k31) d9(k41-k71) "vmovl.s8 q3, d6 \n"// d6(k00-k30) d7(k40-k70) "vld1.s32 {d20[0]}, [%0] \n"// out0 q10 "vld1.s32 {d20[1]}, [%1] \n"// out1 "vld1.s32 {d21[0]}, [%2] \n"// out2 "vld1.s32 {d21[1]}, [%3] \n"// out3 "pld [%11, #64] \n" "vld1.s8 {d24-d26}, [%11]! \n" "vmovl.s8 q14, d26 \n"// d28(k05-k35) d29(k45-k75) "vmovl.s8 q13, d25 \n"// d26(k04-k34) d27(k44-k74) "vmovl.s8 q12, d24 \n"// d24(k03-k33) d25(k43-k73) "vld1.s32 {d22[0]}, [%4] \n"// out4 q11 "vld1.s32 {d22[1]}, [%5] \n"// out5 "vld1.s32 {d23[0]}, [%6] \n"// out6 "vld1.s32 {d23[1]}, [%7] \n"// out7 "vmull.s16 q6, d6, d0[0] \n"// a00 x (k00-k30) "vmull.s16 q7, d7, d0[0] \n"// a00 x (k40-k70) "vmull.s16 q8, d8, d0[1] \n"// a01 x (k01-k31) "vmull.s16 q9, d9, d0[1] \n"// a01 x (k41-k71) "vmlal.s16 q10, d10, d0[2] \n"// a02 x (k02-k32) "vmlal.s16 q11, d11, d0[2] \n"// a02 x (k42-k72) "pld [%11, #64] \n" "vld1.s8 {d6-d8}, [%11]! \n" "vmovl.s8 q5, d8 \n"// d10(k08-k38) d11(k48-k78) "vmovl.s8 q4, d7 \n"// d8(k07-k37) d9(k47-k77) "vmovl.s8 q3, d6 \n"// d6(k06-k36) d7(k46-k76) "vmlal.s16 q6, d24, d2[0] \n"// a10 x (k03-k33) "vmlal.s16 q7, d25, d2[0] \n"// a10 x (k43-k73) "vmlal.s16 q8, d26, d2[1] \n"// a11 x (k04-k34) "vmlal.s16 q9, d27, d2[1] \n"// a11 x (k44-k74) "vmlal.s16 q10, d28, d2[2] \n"// a12 x (k05-k35) "vmlal.s16 q11, d29, d2[2] \n"// a12 x (k45-k75) "vmlal.s16 q6, d6, d4[0] \n"// a20 x (k06-k36) "vmlal.s16 q7, d7, d4[0] \n"// a20 x (k46-k76) "vmlal.s16 q8, d8, d4[1] \n"// a21 x (k07-k37) "vmlal.s16 q9, d9, d4[1] \n"// a21 x (k47-k77) "vmlal.s16 q10, d10, d4[2] \n"// a22 x (k08-k38) "vmlal.s16 q11, d11, d4[2] \n"// a22 x (k48-k78) "vadd.s32 q8, q8, q6 \n" "vadd.s32 q9, q9, q7 \n" "sub %11, %11, #72 \n" "vadd.s32 q10, q10, q8 \n" "vadd.s32 q11, q11, q9 \n" "vst1.s32 {d20[0]}, [%0]! \n"// out0 "vst1.s32 {d20[1]}, [%1]! \n"// out1 "vst1.s32 {d21[0]}, [%2]! \n"// out2 "vst1.s32 {d21[1]}, [%3]! \n"// out3 "vst1.s32 {d22[0]}, [%4]! \n"// out4 "vst1.s32 {d22[1]}, [%5]! \n"// out5 "vst1.s32 {d23[0]}, [%6]! \n"// out6 "vst1.s32 {d23[1]}, [%7]! \n"// out7 : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else // __ARM_NEON int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; sum0 += (int)r0[0] * ktmp[0]; sum1 += (int)r0[0] * ktmp[1]; sum2 += (int)r0[0] * ktmp[2]; sum3 += (int)r0[0] * ktmp[3]; sum4 += (int)r0[0] * ktmp[4]; sum5 += (int)r0[0] * ktmp[5]; sum6 += (int)r0[0] * ktmp[6]; sum7 += (int)r0[0] * ktmp[7]; ktmp += 8; sum0 += (int)r0[1] * ktmp[0]; sum1 += (int)r0[1] * ktmp[1]; sum2 += (int)r0[1] * ktmp[2]; sum3 += (int)r0[1] * ktmp[3]; sum4 += (int)r0[1] * ktmp[4]; sum5 += (int)r0[1] * ktmp[5]; sum6 += (int)r0[1] * ktmp[6]; sum7 += (int)r0[1] * ktmp[7]; ktmp += 8; sum0 += (int)r0[2] * ktmp[0]; sum1 += (int)r0[2] * ktmp[1]; sum2 += (int)r0[2] * ktmp[2]; sum3 += (int)r0[2] * ktmp[3]; sum4 += (int)r0[2] * ktmp[4]; sum5 += (int)r0[2] * ktmp[5]; sum6 += (int)r0[2] * ktmp[6]; sum7 += (int)r0[2] * ktmp[7]; ktmp += 8; sum0 += (int)r1[0] * ktmp[0]; sum1 += (int)r1[0] * ktmp[1]; sum2 += (int)r1[0] * ktmp[2]; sum3 += (int)r1[0] * ktmp[3]; sum4 += (int)r1[0] * ktmp[4]; sum5 += (int)r1[0] * ktmp[5]; sum6 += (int)r1[0] * ktmp[6]; sum7 += (int)r1[0] * ktmp[7]; ktmp += 8; sum0 += (int)r1[1] * ktmp[0]; sum1 += (int)r1[1] * ktmp[1]; sum2 += (int)r1[1] * ktmp[2]; sum3 += (int)r1[1] * ktmp[3]; sum4 += (int)r1[1] * ktmp[4]; sum5 += (int)r1[1] * ktmp[5]; sum6 += (int)r1[1] * ktmp[6]; sum7 += (int)r1[1] * ktmp[7]; ktmp += 8; sum0 += (int)r1[2] * ktmp[0]; sum1 += (int)r1[2] * ktmp[1]; sum2 += (int)r1[2] * ktmp[2]; sum3 += (int)r1[2] * ktmp[3]; sum4 += (int)r1[2] * ktmp[4]; sum5 += (int)r1[2] * ktmp[5]; sum6 += (int)r1[2] * ktmp[6]; sum7 += (int)r1[2] * ktmp[7]; ktmp += 8; sum0 += (int)r2[0] * ktmp[0]; sum1 += (int)r2[0] * ktmp[1]; sum2 += (int)r2[0] * ktmp[2]; sum3 += (int)r2[0] * ktmp[3]; sum4 += (int)r2[0] * ktmp[4]; sum5 += (int)r2[0] * ktmp[5]; sum6 += (int)r2[0] * ktmp[6]; sum7 += (int)r2[0] * ktmp[7]; ktmp += 8; sum0 += (int)r2[1] * ktmp[0]; sum1 += (int)r2[1] * ktmp[1]; sum2 += (int)r2[1] * ktmp[2]; sum3 += (int)r2[1] * ktmp[3]; sum4 += (int)r2[1] * ktmp[4]; sum5 += (int)r2[1] * ktmp[5]; sum6 += (int)r2[1] * ktmp[6]; sum7 += (int)r2[1] * ktmp[7]; ktmp += 8; sum0 += (int)r2[2] * ktmp[0]; sum1 += (int)r2[2] * ktmp[1]; sum2 += (int)r2[2] * ktmp[2]; sum3 += (int)r2[2] * ktmp[3]; sum4 += (int)r2[2] * ktmp[4]; sum5 += (int)r2[2] * ktmp[5]; sum6 += (int)r2[2] * ktmp[6]; sum7 += (int)r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8*9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8*9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out = top_blob.channel(p); out.fill(0); const signed char* ktmp = _kernel.channel(p/8 + p%8); for (int q=0; q<inch; q++) { int* outptr = out; const signed char* img0 = bottom_blob.channel(q); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v0.8b, v1.8b}, [%5] \n"//ktmp "ld2 {v2.8b, v3.8b}, [%2], #16 \n"//r0-r2 "ld2 {v4.8b, v5.8b}, [%2] \n" "ld2 {v6.8b, v7.8b}, [%3], #16 \n"//r3-r5 "ld2 {v8.8b, v9.8b}, [%3] \n" "ld2 {v10.8b, v11.8b}, [%4], #16 \n"//r6-r8 "ld2 {v12.8b, v13.8b}, [%4] \n" "ld1 {v14.4s, v15.4s}, [%1] \n"//out0 "ext v4.8b, v2.8b, v4.8b, #1 \n" "ext v8.8b, v6.8b, v8.8b, #1 \n" "ext v12.8b, v10.8b, v12.8b, #1 \n" "sshll v0.8h, v0.8b, #0 \n"//(k0-k7) "sshll v1.8h, v1.8b, #0 \n"//(k8) "sshll v2.8h, v2.8b, #0 \n"// r0 "sshll v3.8h, v3.8b, #0 \n"// r1 "sshll v4.8h, v4.8b, #0 \n"// r2 "sshll v6.8h, v6.8b, #0 \n"// r3 "sshll v7.8h, v7.8b, #0 \n"// r4 "sshll v8.8h, v8.8b, #0 \n"// r5 "sshll v10.8h, v10.8b, #0 \n"// r6 "sshll v11.8h, v11.8b, #0 \n"// r7 "sshll v12.8h, v12.8b, #0 \n"// r8 // r0 "smull v16.4s, v2.4h, v0.h[0] \n"// out = r0*k0 "smull2 v17.4s, v2.8h, v0.h[0] \n" "smull v18.4s, v3.4h, v0.h[1] \n"// outn = r1*k1 "smull2 v19.4s, v3.8h, v0.h[1] \n" "smlal v16.4s, v4.4h, v0.h[2] \n"// out = r2*k2 "smlal2 v17.4s, v4.8h, v0.h[2] \n" "smlal v18.4s, v6.4h, v0.h[3] \n"// outn = r3*k3 "smlal2 v19.4s, v6.8h, v0.h[3] \n" "smlal v16.4s, v7.4h, v0.h[4] \n"// out = r4*k4 "smlal2 v17.4s, v7.8h, v0.h[4] \n" "smlal v18.4s, v8.4h, v0.h[5] \n"// outn = r5*k5 "smlal2 v19.4s, v8.8h, v0.h[5] \n" "smlal v16.4s, v10.4h, v0.h[6] \n"// out = r6*k6 "smlal2 v17.4s, v10.8h, v0.h[6] \n" "smlal v18.4s, v11.4h, v0.h[7] \n"// outn = r7*k7 "smlal2 v19.4s, v11.8h, v0.h[7] \n" "smlal v16.4s, v12.4h, v1.h[0] \n"// out = r8*k8 "smlal2 v17.4s, v12.8h, v1.h[0] \n" "add v8.4s, v16.4s, v18.4s \n" "add v9.4s, v17.4s, v19.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } #else if (nn > 0) { asm volatile( "vld1.s8 {d0-d1}, [%5] \n"// d0(k0 - k7) d1(k8 ...) "vmovl.s8 q1, d1 \n"// d2(k8 ...) "vmovl.s8 q0, d0 \n"// d0(k0 - k3) d1(k4 - k7) "0: \n" "pld [%2, #192] \n" "vld2.s8 {d4-d5}, [%2]! \n"// r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015) "vld2.s8 {d8-d9}, [%2] \n"// d8(a016 ....) "vld2.s8 {d10-d11}, [%3]! \n"// r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115) "vld2.s8 {d14-d15}, [%3] \n"// d14(a116 ....) "vld2.s8 {d16-d17}, [%4]! \n"// r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215) "vld2.s8 {d20-d21}, [%4] \n"// d20(a216 ....) "vld1.s32 {d22-d25}, [%1] \n"// q11(out0 - out3) q12(out4 - out7) "vext.s8 d8, d4, d8, #1 \n"// d8(a02 a04 ... a016) "vext.s8 d14, d10, d14, #1 \n"// d14(a12 a14 ... a116) "vext.s8 d20, d16, d20, #1 \n"// d20(a22 a24 ... a216) "vmovl.s8 q3, d5 \n"// q3(a01 a03 ... a015) "vmovl.s8 q2, d4 \n"// q2(a00 a02 ... a014) "vmovl.s8 q4, d8 \n"// q4(a02 a04 ... a016) "vmovl.s8 q6, d11 \n"// q6(a11 a13 ... a115) "vmovl.s8 q5, d10 \n"// q5(a10 a12 ... a114) "vmovl.s8 q7, d14 \n"// q7(a12 a14 ... a116) "vmovl.s8 q9, d17 \n"// q9(a21 a23 ... a215) "vmovl.s8 q8, d16 \n"// q8(a20 a22 ... a214) "vmovl.s8 q10, d20 \n"// q10(a22 a24 ... a216) "vmlal.s16 q11, d4, d0[0] \n"// k0 "vmlal.s16 q12, d5, d0[0] \n" "vmull.s16 q13, d6, d0[1] \n"// k1 "vmull.s16 q14, d7, d0[1] \n" "vmlal.s16 q11, d8, d0[2] \n"// k2 "vmlal.s16 q12, d9, d0[2] \n" "vmlal.s16 q13, d12, d1[0] \n"// k4 "vmlal.s16 q14, d13, d1[0] \n" "vmlal.s16 q11, d10, d0[3] \n"// k3 "vmlal.s16 q12, d11, d0[3] \n" "vmlal.s16 q13, d14, d1[1] \n"// k5 "vmlal.s16 q14, d15, d1[1] \n" "vmlal.s16 q11, d16, d1[2] \n"// k6 "vmlal.s16 q12, d17, d1[2] \n" "vmlal.s16 q13, d18, d1[3] \n"// k7 "vmlal.s16 q14, d19, d1[3] \n" "vmlal.s16 q11, d20, d2[0] \n"// k8 "vmlal.s16 q12, d21, d2[0] \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.32 {d22-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(ktmp) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON if (remain > 0) { #if __ARM_NEON int8x8_t _k01234567s8 = vld1_s8(ktmp); int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp+8); int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3); int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6); int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8); int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8); int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8); #endif for (; remain>0; remain--) { #if __ARM_NEON int8x8_t _r00s8 = vld1_s8(r0); int8x8_t _r10s8 = vld1_s8(r1); int8x8_t _r20s8 = vld1_s8(r2); int16x8_t _r00s16 = vmovl_s8(_r00s8); int16x8_t _r10s16 = vmovl_s8(_r10s8); int16x8_t _r20s16 = vmovl_s8(_r20s8); int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16)); _sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16)); _sum = vsetq_lane_s32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_s32(_sum); #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); *outptr = vget_lane_s32(_ss, 0); #endif // __aarch64__ #else int sum = 0; sum += (int)r0[0] * ktmp[0]; sum += (int)r0[1] * ktmp[1]; sum += (int)r0[2] * ktmp[2]; sum += (int)r1[0] * ktmp[3]; sum += (int)r1[1] * ktmp[4]; sum += (int)r1[2] * ktmp[5]; sum += (int)r2[0] * ktmp[6]; sum += (int)r2[1] * ktmp[7]; sum += (int)r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } } static void conv3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 3; int kernel_h = 3; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
psicov21.c
/* PSICOV - Protein Sparse Inverse COVariance analysis program */ /* by David T. Jones August 2011 - Copyright (C) 2011 University College London */ /* This code is licensed under the terms of GNU General Public License v2 or later */ /* Version 2.1beta3 - Last Edit 27/4/14 */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <ctype.h> #include <math.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #define FALSE 0 #define TRUE 1 #define SQR(x) ((x)*(x)) #define MAX(x,y) ((x)>(y)?(x):(y)) #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAXSEQLEN 5000 #define MINEFSEQS (seqlen) /* Dump a rude message to standard error and exit */ void fail(char *fmt, ...) { va_list ap; va_start(ap, fmt) ; fprintf(stderr, "*** "); vfprintf(stderr, fmt, ap); fputc('\n', stderr); exit(-1); } /* Convert AA letter to numeric code (0-21) */ int aanum(int ch) { const static int aacvs[] = { 999, 0, 3, 4, 3, 6, 13, 7, 8, 9, 21, 11, 10, 12, 2, 21, 14, 5, 1, 15, 16, 21, 19, 17, 21, 18, 6 }; return (isalpha(ch) ? aacvs[ch & 31] : 20); } /* Allocate matrix */ void *allocmat(int rows, int columns, int size) { int i; void **p, *rp; rp = malloc(rows * sizeof(void *) + sizeof(int)); if (rp == NULL) fail("allocmat: malloc [] failed!"); *((int *)rp) = rows; p = rp + sizeof(int); for (i = 0; i < rows; i++) if ((p[i] = calloc(columns, size)) == NULL) fail("allocmat: malloc [][] failed!"); return p; } /* Allocate vector */ void *allocvec(int columns, int size) { void *p; p = calloc(columns, size); if (p == NULL) fail("allocvec: calloc failed!"); return p; } /* This subroutine computes the L1 regularized covariance matrix estimate using the algorithm described in the paper: J. Friedman, T. Hastie, R. Tibshirani: Sparse inverse covariance estimation with the graphical lasso Biostatistics, 9(3):432-441, July 2008. This code is adapted from the Fortran code described in the following report: M. A. Sustik & B. Calderhead: GLASSOFAST: An efficient GLASSO implementation Technical Report TR-12-29, University of Texas at Austin NOTE: that when multiple threads are used, we gain a huge time saving by avoiding full thread synchronisation when updating elements of the W (covariance) matrix. In multithreaded mode, the order of updates to the W matrix at each iteration will depend on the order in which threads complete. In practice, this hardly matters, because the algorithm is iterative, and in testing still converges to within 6 d.p. of the non-threaded code. If a very small degree of non-deterministic behaviour really worries you, then set the maximum number of threads to 1 (or compile without OpenMP). */ #define EPS (1.1e-15) #define BIG (1e9) int glassofast(const int n, double **S, double **L, const double thr, const int maxit, int approxflg, int warm, double **X, double **W) { int i, j, ii, iter, jj; double a, b, c, delta, dlx, dw, shr, sum, thrlasso, tmp, wd[MAXSEQLEN*21], wxj[MAXSEQLEN*21]; for (shr=ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) shr += fabs(S[ii][jj]); for (i=0; i<n; i++) shr -= fabs(S[i][i]); if (shr == 0.0) { /* S is diagonal. */ for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) W[ii][jj] = X[ii][jj] = 0.0; for (i=0; i<n; i++) W[i][i] = W[i][i] + L[i][i]; for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) X[ii][jj] = 0.0; for (i=0; i<n; i++) X[i][i] = 1.0 / MAX(W[i][i], EPS); return 0; } shr *= thr/(n-1); thrlasso = shr/n; if (thrlasso < 2*EPS) thrlasso = 2*EPS; if (!warm) { for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) { W[ii][jj] = S[ii][jj]; X[ii][jj] = 0.0; } } else { for (i=0; i<n; i++) { for (ii=0; ii<n; ii++) X[i][ii] = -X[i][ii]/X[i][i]; X[i][i] = 0.0; } } for (i=0; i<n; i++) { wd[i] = S[i][i] + L[i][i]; W[i][i] = wd[i]; } for (iter = 1; iter<=maxit; iter++) { dw = 0.0; #pragma omp parallel for default(shared) private(i,j,ii,wxj,a,b,c,dlx,delta,sum) for (j=0; j<n; j++) { for (ii=0; ii<n; ii++) wxj[ii] = 0.0; for (i=0; i<n; i++) if (X[j][i] != 0.0) for (ii=0; ii<n; ii++) wxj[ii] += W[i][ii] * X[j][i]; for (;;) { dlx = 0.0; for (i=0; i<n; i++) { if (i != j && L[j][i] < BIG) { a = S[j][i] - wxj[i] + wd[i] * X[j][i]; b = fabs(a) - L[j][i]; if (b <= 0.0) c = 0.0; else if (a >= 0.0) c = b / wd[i]; else c = -b / wd[i]; delta = c - X[j][i]; if (delta != 0.0 && (!approxflg || fabs(delta) > 1e-6)) { X[j][i] = c; for (ii=0; ii<n; ii++) wxj[ii] += W[i][ii] * delta; if (fabs(delta) > dlx) dlx = fabs(delta); } } } if (dlx < thrlasso) break; } wxj[j] = wd[j]; for (sum=ii=0; ii<n; ii++) sum += fabs(wxj[ii] - W[j][ii]); #pragma omp critical if (sum > dw) dw = sum; for (ii=0; ii<n; ii++) W[j][ii] = wxj[ii]; for (ii=0; ii<n; ii++) W[ii][j] = wxj[ii]; } if (dw <= shr) break; } for (i=0; i<n; i++) { for (sum=ii=0; ii<n; ii++) sum += X[i][ii] * W[i][ii]; tmp = 1.0 / (wd[i] - sum); for (ii=0; ii<n; ii++) X[i][ii] = -tmp * X[i][ii]; X[i][i] = tmp; } for (i=0; i<n-1; i++) { for (ii=i+1; ii<n; ii++) { X[i][ii] = 0.5 * (X[i][ii] + X[ii][i]); X[ii][i] = X[i][ii]; } } return iter; } /* Perform Cholesky decomposition on matrix */ int test_cholesky(double **a, const int n) { int i, j, k, status=0; double sum; static double *diag; if (diag == NULL) diag = allocvec(n, sizeof(double)); for (i=0; i<n; i++) { if (!status) for (j=i; j<n; j++) { sum = a[i][j]; for (k=i-1; k >= 0; k--) sum -= a[i][k]*a[j][k]; if (i == j) { if (sum <= 0.0) status = 1; diag[i] = sqrt(sum); } else a[j][i] = sum / diag[i]; } } return status; } struct sc_entry { double sc; int i, j; } *sclist; /* Sort descending */ int cmpfn(const void *a, const void *b) { if (((struct sc_entry *)a)->sc == ((struct sc_entry *)b)->sc) return 0; if (((struct sc_entry *)a)->sc < ((struct sc_entry *)b)->sc) return 1; return -1; } int main(int argc, char **argv) { int a, b, i, j, k, seqlen, nids, s, nseqs, ncon, opt, ndim, filtflg=0, approxflg=0, initflg=0, apcflg=1, maxit=10000, npair, nnzero, niter, jerr, shrinkflg=1, rawscflg = 1, pseudoc = 1, minseqsep = 5, overrideflg=0; unsigned int *wtcount, ccount[MAXSEQLEN]; double thresh=1e-4, del, **pcmat, *pcsum, pcmean, pc, trialrho, rhodefault = -1.0; double sum, score, **pa, wtsum, lambda, smean, fnzero, lastfnzero, rfact, r2, targfnzero = 0.0, scsum, scsumsq, mean, sd, zscore, ppv; double *weight, idthresh = -1.0, maxgapf = 0.9; char buf[4096], seq[MAXSEQLEN], *blockfn = NULL, **aln; FILE *ifp; while ((opt = getopt(argc, argv, "aflnopr:b:i:t:c:g:d:j:z:")) >= 0) switch (opt) { case 'a': approxflg = 1; break; case 'n': shrinkflg = 0; break; case 'o': overrideflg = 1; break; case 'p': rawscflg = 0; break; case 'f': filtflg = 1; break; case 'l': apcflg = 0; break; case 'r': rhodefault = atof(optarg); break; case 'd': targfnzero = atof(optarg); if (targfnzero < 5e-5 || targfnzero >= 1.0) fail("Target density value must be in range 5e-5 >= d < 1!"); break; case 't': thresh = atof(optarg); break; case 'i': idthresh = 1.0 - atof(optarg)/100.0; break; case 'c': pseudoc = atoi(optarg); break; case 'j': minseqsep = atoi(optarg); break; case 'b': blockfn = strdup(optarg); break; case 'g': maxgapf = atof(optarg); break; case 'z': #ifdef _OPENMP omp_set_num_threads(atoi(optarg)); #endif break; case '?': exit(-1); } if (optind >= argc) fail("Usage: psicov [options] alnfile\n\nOptions:\n-a\t: use approximate Lasso algorithm\n-n\t: don't pre-shrink the sample covariance matrix\n-f\t: filter low-scoring contacts\n-p\t: output PPV estimates rather than raw scores\n-l\t: don't apply APC to Lasso output\n-r nnn\t: set initial rho parameter\n-d nnn\t: set target precision matrix sparsity (default 0 = not specified)\n-t nnn\t: set Lasso convergence threshold (default 1e-4)\n-i nnn\t: select BLOSUM-like weighting with given identity threshold (default selects threshold automatically)\n-c nnn\t: set pseudocount value (default 1)\n-j nnn\t: set minimum sequence separation (default 5)\n-g nnn\t: maximum fraction of gaps (default 0.9)\n-z nnn\t: set maximum no. of threads\n-b file\t: read rho parameter file\n"); ifp = fopen(argv[optind], "r"); if (!ifp) fail("Unable to open alignment file!"); for (nseqs=0;; nseqs++) if (!fgets(seq, MAXSEQLEN, ifp)) break; aln = allocvec(nseqs, sizeof(char *)); weight = allocvec(nseqs, sizeof(double)); wtcount = allocvec(nseqs, sizeof(unsigned int)); rewind(ifp); if (!fgets(seq, MAXSEQLEN, ifp)) fail("Bad alignment file!"); seqlen = strlen(seq)-1; if (!(aln[0] = malloc(seqlen))) fail("Out of memory!"); for (j=0; j<seqlen; j++) aln[0][j] = aanum(seq[j]); for (i=1; i<nseqs; i++) { if (!fgets(seq, MAXSEQLEN, ifp)) break; if (seqlen != strlen(seq)-1) fail("Length mismatch in alignment file!"); if (!(aln[i] = malloc(seqlen))) fail("Out of memory!"); for (j=0; j<seqlen; j++) aln[i][j] = aanum(seq[j]); } /* Calculate sequence weights (use openMP/pthreads if available) */ if (idthresh < 0.0) { double meanfracid = 0.0; #pragma omp parallel for default(shared) private(j,k) reduction(+:meanfracid) for (i=0; i<nseqs; i++) for (j=i+1; j<nseqs; j++) { int nids; double fracid; for (nids=k=0; k<seqlen; k++) nids += (aln[i][k] == aln[j][k]); fracid = (double)nids / seqlen; meanfracid += fracid; } meanfracid /= 0.5 * nseqs * (nseqs - 1.0); idthresh = MIN(0.6, 0.38 * 0.32 / meanfracid); // printf("idthresh = %f meanfracid = %f\n", idthresh, meanfracid); } #pragma omp parallel for default(shared) private(j,k) for (i=0; i<nseqs; i++) for (j=i+1; j<nseqs; j++) { int nthresh = seqlen * idthresh; for (k=0; nthresh > 0 && k<seqlen; k++) nthresh -= (aln[i][k] != aln[j][k]); if (nthresh > 0) { #pragma omp critical wtcount[i]++; wtcount[j]++; } } for (wtsum=i=0; i<nseqs; i++) wtsum += (weight[i] = 1.0 / (1 + wtcount[i])); // printf("wtsum = %f\n", wtsum); if (wtsum < MINEFSEQS && !overrideflg) fail("Sorry - not enough sequences or sequence diversity to proceed!\nNeff (%f) < MINEFSEQS (%d)\nIf you want to force a calculation at your own risk, adjust MINEFSEQS or use -o to override.\n", wtsum, MINEFSEQS); pa = allocmat(seqlen, 21, sizeof(double)); /* Calculate singlet frequencies with pseudocount */ for (i=0; i<seqlen; i++) { for (a=0; a<21; a++) pa[i][a] = pseudoc; for (k=0; k<nseqs; k++) { a = aln[k][i]; if (a < 21) pa[i][a] += weight[k]; } for (a=0; a<21; a++) pa[i][a] /= pseudoc * 21.0 + wtsum; } double **cmat, **rho, **ww, **wwi, **tempmat; ndim = seqlen * 21; cmat = allocmat(ndim, ndim, sizeof(double)); tempmat = allocmat(ndim, ndim, sizeof(double)); /* Form the covariance matrix */ #pragma omp parallel for default(shared) private(j,k,a,b) for (i=0; i<seqlen; i++) for (j=i; j<seqlen; j++) { double pab[21][21]; for (a=0; a<21; a++) for (b=0; b<21; b++) if (i == j) pab[a][b] = (a == b) ? pa[i][a] : 0.0; else pab[a][b] = pseudoc / 21.0; if (i != j) { for (k=0; k<nseqs; k++) { a = aln[k][i]; b = aln[k][j]; if (a < 21 && b < 21) pab[a][b] += weight[k]; } for (a=0; a<21; a++) for (b=0; b<21; b++) pab[a][b] /= pseudoc * 21.0 + wtsum; } for (a=0; a<21; a++) for (b=0; b<21; b++) if (i != j || a == b) cmat[i*21+a][j*21+b] = cmat[j*21+b][i*21+a] = pab[a][b] - pa[i][a] * pa[j][b]; } /* Shrink sample covariance matrix towards shrinkage target F = Diag(1,1,1,...,1) * smean */ if (shrinkflg) { for (smean=i=0; i<ndim; i++) smean += cmat[i][i]; smean /= (double)ndim; lambda = 0.2; for (;;) { for (i=0; i<ndim; i++) memcpy(tempmat[i], cmat[i], ndim*sizeof(double)); /* Test if positive definite using Cholesky decomposition */ if (!test_cholesky(tempmat, ndim)) break; #pragma omp parallel for default(shared) private(j,a,b) for (i=0; i<seqlen; i++) for (j=0; j<seqlen; j++) for (a=0; a<21; a++) for (b=0; b<21; b++) if (i != j) cmat[i*21+a][j*21+b] *= 1.0 - lambda; else if (a == b) cmat[i*21+a][j*21+b] = smean * lambda + (1.0 - lambda) * cmat[i*21+a][j*21+b]; } } rho = allocmat(ndim, ndim, sizeof(double)); ww = allocmat(ndim, ndim, sizeof(double)); wwi = allocmat(ndim, ndim, sizeof(double)); lastfnzero=0.0; /* Guess at a reasonable starting rho value if undefined */ if (rhodefault < 0.0) trialrho = MAX(0.001, 1.0 / wtsum); else trialrho = rhodefault; rfact = 0.0; for (;;) { double targdiff, besttd = BIG, bestrho; if (trialrho <= 0.0 || trialrho >= 1.0) { /* Give up search - recalculate with best rho found so far and exit */ trialrho = bestrho; targfnzero = 0.0; } for (i=0; i<ndim; i++) for (j=0; j<ndim; j++) rho[i][j] = trialrho; for (i=0; i<seqlen; i++) for (j=0; j<seqlen; j++) for (a=0; a<21; a++) for (b=0; b<21; b++) if ((a != b && i == j) || pa[i][20] > maxgapf || pa[j][20] > maxgapf) rho[i*21+a][j*21+b] = BIG; /* Mask out regions if block-out list provided */ if (blockfn != NULL) { ifp = fopen(blockfn, "r"); for (;;) { if (fscanf(ifp, "%d %d %lf", &i, &j, &score) != 3) break; for (a=0; a<21; a++) for (b=0; b<21; b++) { rho[(i-1)*21+a][(j-1)*21+b] = score; rho[(j-1)*21+b][(i-1)*21+a] = score; } } fclose(ifp); } glassofast(ndim, cmat, rho, thresh, maxit, approxflg, initflg, wwi, ww); /* Don't attempt interation if too few sequences */ if (targfnzero <= 0.0 || wtsum < seqlen) break; for (npair=nnzero=i=0; i<ndim; i++) for (j=i+1; j<ndim; j++,npair++) if (wwi[i][j] != 0.0) nnzero++; fnzero = (double) nnzero / npair; // printf("rho=%f fnzero = %f\n", trialrho, fnzero); /* Stop iterating if we have achieved the target sparsity level */ targdiff = fabs(fnzero - targfnzero)/targfnzero; if (targdiff < 0.01) break; if (targdiff < besttd) { besttd = targdiff; bestrho = trialrho; } if (fnzero == 0.0) { /* As we have guessed far too high, halve rho and try again */ trialrho *= 0.5; continue; } if (lastfnzero > 0.0 && fnzero != lastfnzero) { // printf("fnzero=%f lastfnzero=%f trialrho=%f oldtrialrho=%f\n", fnzero, lastfnzero, trialrho, trialrho/rfact); rfact = pow(rfact, log(targfnzero / fnzero) / log(fnzero / lastfnzero)); // printf("New rfact = %f\n", rfact); } lastfnzero = fnzero; /* Make a small trial step in the appropriate direction */ if (rfact == 0.0) rfact = (fnzero < targfnzero) ? 0.9 : 1.1; trialrho *= rfact; } /* Calculate background corrected scores using average product correction */ pcmat = allocmat(seqlen, seqlen, sizeof(double)); pcsum = allocvec(seqlen, sizeof(double)); pcmean = 0.0; for (i=0; i<seqlen; i++) for (j=i+1; j<seqlen; j++) { for (pc=a=0; a<20; a++) for (b=0; b<20; b++) pc += fabs(wwi[i*21+a][j*21+b]); pcmat[i][j] = pcmat[j][i] = pc; pcsum[i] += pc; pcsum[j] += pc; pcmean += pc; } pcmean /= seqlen * (seqlen - 1) * 0.5; /* Build final list of predicted contacts */ sclist = allocvec(seqlen * (seqlen - 1) / 2, sizeof(struct sc_entry)); for (scsum=scsumsq=ncon=i=0; i<seqlen; i++) for (j=i+minseqsep; j<seqlen; j++) if (pcmat[i][j] > 0.0) { /* Calculate APC score */ if (apcflg) sclist[ncon].sc = pcmat[i][j] - pcsum[i] * pcsum[j] / SQR(seqlen - 1.0) / pcmean; else sclist[ncon].sc = pcmat[i][j]; scsum += sclist[ncon].sc; scsumsq += SQR(sclist[ncon].sc); sclist[ncon].i = i; sclist[ncon++].j = j; } qsort(sclist, ncon, sizeof(struct sc_entry), cmpfn); mean = scsum / ncon; sd = 1.25 * sqrt(scsumsq / ncon - SQR(mean)); /* Corrected for extreme-value bias */ for (i=0; i<seqlen; i++) ccount[i] = 0; /* Print output in CASP RR format with optional PPV estimated from final Z-score */ if (rawscflg) for (i=0; i<ncon; i++) printf("%d %d 0 8 %f\n", sclist[i].i+1, sclist[i].j+1, sclist[i].sc); else for (i=0; i<ncon; i++) { zscore = (sclist[i].sc - mean) / sd; ppv = 0.904 / (1.0 + 16.61 * exp(-0.8105 * zscore)); if (ppv >= 0.5 || (!ccount[sclist[i].i] || !ccount[sclist[i].j]) || !filtflg) { printf("%d %d 0 8 %f\n", sclist[i].i+1, sclist[i].j+1, ppv); ccount[sclist[i].i]++; ccount[sclist[i].j]++; } } return 0; }
snap_red.c
#include <stdio.h> #define N 10 int main (void) { long int aa=0; int res = 0; int ng =12; int cmom = 14; int nxyz = 5000; // fails for 149 and above: nxyz=149; // for testing: nxyz = 10; #pragma omp target teams distribute num_teams(nxyz) thread_limit(ng*(cmom-1)) map(tofrom:aa) for (int gid = 0; gid < nxyz; gid++) { #pragma omp parallel for collapse(2) for (unsigned int g = 0; g < ng; g++) { for (unsigned int l = 0; l < cmom-1; l++) { int a = 0; #pragma omp parallel for reduction(+:a) for (int i = 0; i < N; i++) { a += i; } #pragma omp atomic aa += a; } } } long exp = (long)ng*(cmom-1)*nxyz*(N*(N-1)/2); printf ("The result is = %ld exp:%ld!\n", aa,exp); if (aa != exp) { printf("Failed %ld\n",aa); return 1; } return 0; }
matmult_omp_bagoftasks.c
/* Matrix multiplication example OpenMP version, bag of tasks Jim Teresco, CS 338, Williams College, CS 341, Mount Holyoke College Sun Feb 23 18:54:41 EST 2003 Updated for CSIS-335, Siena College, Fall 2021 */ /* header files needed for printf, gettimeofday, struct timeval */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> /* header file for our own timer.c function diffgettime */ #include "timer.h" /* we will multiply square matrices, how big? */ #define SIZE 1500 /* our matrices */ double a[SIZE][SIZE], b[SIZE][SIZE], c[SIZE][SIZE]; /* function to compute the result of row row in c */ void do_row(int row) { int col, k; for (col=0; col<SIZE; col++) { /* initialize entry */ c[row][col] = 0; /* perform dot product */ for(k=0; k<SIZE; k++) { c[row][col] = c[row][col] + a[row][k]*b[k][col]; } } } /* it's a simple program for now, we'll just put everything in main */ int main(int argc, char *argv[]) { /* counters */ int i, j, k; double sum; /* to pass to gettimeofday to get wall clock times */ struct timeval start, stop; /* our bag of tasks - each row is a task */ int next_avail_task = 0; int current_task; /* initialize and allocate matrices, just fill with junk */ gettimeofday(&start, NULL); for (i=0; i<SIZE; i++) { for (j=0; j<SIZE; j++) { a[i][j] = i+j; b[i][j] = i-j; } } gettimeofday(&stop, NULL); printf("Initialization took: %f seconds\n", diffgettime(start,stop)); gettimeofday(&start, NULL); /* matrix-matrix multiply */ #pragma omp parallel private(current_task) shared(next_avail_task) { /* mutual exclution on next_avail_task */ #pragma omp critical(mutex) current_task = next_avail_task++; /* process rows from the bag of tasks */ while (current_task < SIZE) { do_row(current_task); /* mutual exclusion on next_avail_task */ #pragma omp critical(mutex) current_task = next_avail_task++; } } /* end of parallel block */ /* there is an implied barrier here -- the master thread cannot continue until it and all other threads have completed the parallel block. */ gettimeofday(&stop, NULL); printf("Multiplication took: %f seconds\n", diffgettime(start,stop)); /* This is here to make sure the optimizing compiler doesn't get any big ideas about "optimizing" code away completely */ sum=0; for (i=0; i<SIZE; i++) { for (j=0; j<SIZE; j++) { sum += c[i][j]; } } printf("Sum of elements of c=%f\n", sum); return 0; }
semiring.h
#ifndef __SEMIRING_H__ #define __SEMIRING_H__ #include "functions.h" #include "../sparse_formats/csr.h" #include "../sparse_formats/ccsr.h" #include "../redistribution/nosym_transp.h" #include <iostream> using namespace std; namespace CTF_int { template <typename dtype> dtype default_mul(dtype a, dtype b){ return a*b; } template <typename dtype> void default_vec_mul(dtype const * a, dtype const * b, dtype * c, int64_t n){ for (int64_t i=0; i<n; i++){ c[i] = a[i]*b[i]; } } template <typename dtype> void default_axpy(int n, dtype alpha, dtype const * X, int incX, dtype * Y, int incY){ for (int i=0; i<n; i++){ Y[incY*i] += alpha*X[incX*i]; } } template <> void default_axpy<float> (int,float,float const *,int,float *,int); template <> void default_axpy<double> (int,double,double const *,int,double *,int); template <> void default_axpy< std::complex<float> > (int,std::complex<float>,std::complex<float> const *,int,std::complex<float> *,int); template <> void default_axpy< std::complex<double> > (int,std::complex<double>,std::complex<double> const *,int,std::complex<double> *,int); template <typename dtype> void default_scal(int n, dtype alpha, dtype * X, int incX){ for (int i=0; i<n; i++){ X[incX*i] *= alpha; } } template <> void default_scal<float>(int n, float alpha, float * X, int incX); template <> void default_scal<double>(int n, double alpha, double * X, int incX); template <> void default_scal< std::complex<float> > (int n, std::complex<float> alpha, std::complex<float> * X, int incX); template <> void default_scal< std::complex<double> > (int n, std::complex<double> alpha, std::complex<double> * X, int incX); template<typename dtype> void default_gemm(char tA, char tB, int m, int n, int k, dtype alpha, dtype const * A, dtype const * B, dtype beta, dtype * C){ int i,j,l; int istride_A, lstride_A, jstride_B, lstride_B; //TAU_FSTART(default_gemm); if (tA == 'N' || tA == 'n'){ istride_A=1; lstride_A=m; } else { istride_A=k; lstride_A=1; } if (tB == 'N' || tB == 'n'){ jstride_B=k; lstride_B=1; } else { jstride_B=1; lstride_B=n; } for (j=0; j<n; j++){ for (i=0; i<m; i++){ C[j*m+i] *= beta; for (l=0; l<k; l++){ C[j*m+i] += alpha*A[istride_A*i+lstride_A*l]*B[lstride_B*l+jstride_B*j]; } } } //TAU_FSTOP(default_gemm); } template<typename dtype> dtype ** get_grp_ptrs(int64_t grp_sz, int64_t ngrp, dtype const * data){ dtype ** data_ptrs = (dtype**)malloc(sizeof(dtype*)*ngrp); #ifdef _OPENMP #pragma omp parallel for #endif for (int i=0; i<ngrp; i++){ data_ptrs[i] = ((dtype*)data)+i*grp_sz; } return data_ptrs; } template <typename dtype> void gemm_batch( char taA, char taB, int l, int m, int n, int k, dtype alpha, dtype const* A, dtype const* B, dtype beta, dtype * C); template <typename dtype> void gemm(char tA, char tB, int m, int n, int k, dtype alpha, dtype const * A, dtype const * B, dtype beta, dtype * C); template<> inline void default_gemm<float> (char tA, char tB, int m, int n, int k, float alpha, float const * A, float const * B, float beta, float * C){ CTF_int::gemm<float>(tA,tB,m,n,k,alpha,A,B,beta,C); } template<> inline void default_gemm<double> (char tA, char tB, int m, int n, int k, double alpha, double const * A, double const * B, double beta, double * C){ CTF_int::gemm<double>(tA,tB,m,n,k,alpha,A,B,beta,C); } template<> inline void default_gemm< std::complex<float> > (char tA, char tB, int m, int n, int k, std::complex<float> alpha, std::complex<float> const * A, std::complex<float> const * B, std::complex<float> beta, std::complex<float> * C){ CTF_int::gemm< std::complex<float> >(tA,tB,m,n,k,alpha,A,B,beta,C); } template<> inline void default_gemm< std::complex<double> > (char tA, char tB, int m, int n, int k, std::complex<double> alpha, std::complex<double> const * A, std::complex<double> const * B, std::complex<double> beta, std::complex<double> * C){ CTF_int::gemm< std::complex<double> >(tA,tB,m,n,k,alpha,A,B,beta,C); } template<typename dtype> void default_gemm_batch (char taA, char taB, int l, int m, int n, int k, dtype alpha, dtype const* A, dtype const* B, dtype beta, dtype * C){ if (m == 1 && n == 1 && k == 1){ for (int i=0; i<l; i++){ C[i] = C[i]*beta + alpha*A[i]*B[i]; } } else { for (int i=0; i<l; i++){ default_gemm<dtype>(taA, taB, m, n, k, alpha, A+i*m*k, B+i*k*n, beta, C+i*m*n); } } } template<> inline void default_gemm_batch<float> (char taA, char taB, int l, int m, int n, int k, float alpha, float const* A, float const* B, float beta, float * C){ CTF_int::gemm_batch<float>(taA, taB, l, m, n, k, alpha, A, B, beta, C); } template<> inline void default_gemm_batch<double> (char taA, char taB, int l, int m, int n, int k, double alpha, double const* A, double const* B, double beta, double * C){ CTF_int::gemm_batch<double>(taA, taB, l, m, n, k, alpha, A, B, beta, C); } template<> inline void default_gemm_batch<std::complex<float>> (char taA, char taB, int l, int m, int n, int k, std::complex<float> alpha, std::complex<float> const* A, std::complex<float> const* B, std::complex<float> beta, std::complex<float> * C){ CTF_int::gemm_batch< std::complex<float> >(taA, taB, l, m, n, k, alpha, A, B, beta, C); } template<> inline void default_gemm_batch<std::complex<double>> (char taA, char taB, int l, int m, int n, int k, std::complex<double> alpha, std::complex<double> const* A, std::complex<double> const* B, std::complex<double> beta, std::complex<double> * C){ CTF_int::gemm_batch< std::complex<double> >(taA, taB, l, m, n, k, alpha, A, B, beta, C); } template <typename dtype> void default_coomm (int m, int n, int k, dtype alpha, dtype const * A, int const * rows_A, int const * cols_A, int nnz_A, dtype const * B, dtype beta, dtype * C){ //TAU_FSTART(default_coomm); for (int j=0; j<n; j++){ for (int i=0; i<m; i++){ C[j*m+i] *= beta; } } for (int i=0; i<nnz_A; i++){ int row_A = rows_A[i]-1; int col_A = cols_A[i]-1; for (int col_C=0; col_C<n; col_C++){ C[col_C*m+row_A] += alpha*A[i]*B[col_C*k+col_A]; } } //TAU_FSTOP(default_coomm); } template <> void default_coomm< float > (int,int,int,float,float const *,int const *,int const *,int,float const *,float,float *); template <> void default_coomm< double > (int,int,int,double,double const *,int const *,int const *,int,double const *,double,double *); template <> void default_coomm< std::complex<float> > (int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,std::complex<float>,std::complex<float> *); template <> void default_coomm< std::complex<double> > (int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,std::complex<double>,std::complex<double> *); } namespace CTF { /** * \addtogroup algstrct * @{ */ /** * \brief Semiring is a Monoid with an addition multiplicaton function * addition must have an identity and be associative, does not need to be commutative * multiplications must have an identity as well as be distributive and associative * special case (parent) of a Ring (which also has an additive inverse) */ template <typename dtype=double, bool is_ord=CTF_int::get_default_is_ord<dtype>()> class Semiring : public Monoid<dtype, is_ord> { public: bool is_def; dtype tmulid; void (*fscal)(int,dtype,dtype*,int); void (*faxpy)(int,dtype,dtype const*,int,dtype*,int); dtype (*fmul)(dtype a, dtype b); void (*fvmul)(dtype const * a, dtype const * b, dtype * c, int64_t n); void (*fgemm)(char,char,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*); void (*fcoomm)(int,int,int,dtype,dtype const*,int const*,int const*,int,dtype const*,dtype,dtype*); void (*fgemm_batch)(char,char,int,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*); //void (*fcsrmm)(int,int,int,dtype,dtype const*,int const*,int const*,dtype const*,dtype,dtype*); //csrmultd_ kernel for multiplying two sparse matrices into a dense output //void (*fcsrmultd)(int,int,int,dtype const*,int const*,int const*,dtype const*,int const*, int const*,dtype*,int); Semiring(Semiring const & other) : Monoid<dtype, is_ord>(other) { this->tmulid = other.tmulid; this->fscal = other.fscal; this->faxpy = other.faxpy; this->fmul = other.fmul; this->fvmul = other.fvmul; this->fgemm = other.fgemm; this->fcoomm = other.fcoomm; this->is_def = other.is_def; this->fgemm_batch = other.fgemm_batch; } virtual CTF_int::algstrct * clone() const { return new Semiring<dtype, is_ord>(*this); } /** * \brief constructor for algstrct equipped with * and + * \param[in] addid_ additive identity * \param[in] fadd_ binary addition function * \param[in] addmop_ MPI_Op operation for addition * \param[in] mulid_ multiplicative identity * \param[in] fmul_ binary multiplication function * \param[in] fvmul_ binary vector multiplication function * \param[in] gemm_ block matrix multiplication function * \param[in] axpy_ vector sum function * \param[in] scal_ vector scale function * \param[in] coomm_ kernel for multiplying sparse matrix in coordinate format with dense matrix */ Semiring(dtype addid_, dtype (*fadd_)(dtype a, dtype b), MPI_Op addmop_, dtype mulid_, dtype (*fmul_)(dtype a, dtype b), void (*gemm_)(char,char,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*)=NULL, void (*axpy_)(int,dtype,dtype const*,int,dtype*,int)=NULL, void (*scal_)(int,dtype,dtype*,int)=NULL, void (*coomm_)(int,int,int,dtype,dtype const*,int const*,int const*,int,dtype const*,dtype,dtype*)=NULL, void (*fgemm_batch_)(char,char,int,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*)=NULL, void (*fvmul_)(dtype const * a, dtype const * b, dtype * c, int64_t n)=NULL) : Monoid<dtype, is_ord>(addid_, fadd_, addmop_) { fmul = fmul_; fvmul = fvmul_; fgemm = gemm_; faxpy = axpy_; fscal = scal_; fcoomm = coomm_; fgemm_batch = fgemm_batch_; tmulid = mulid_; // if provided a coordinate MM kernel, don't use CSR this->has_coo_ker = (coomm_ != NULL); is_def = false; } /** * \brief constructor for algstrct equipped with + only */ Semiring() : Monoid<dtype,is_ord>() { tmulid = dtype(1); fmul = &CTF_int::default_mul<dtype>; fvmul = &CTF_int::default_vec_mul<dtype>; fgemm = &CTF_int::default_gemm<dtype>; faxpy = &CTF_int::default_axpy<dtype>; fscal = &CTF_int::default_scal<dtype>; fcoomm = &CTF_int::default_coomm<dtype>; fgemm_batch = &CTF_int::default_gemm_batch<dtype>; is_def = true; } void mul(char const * a, char const * b, char * c) const { ((dtype*)c)[0] = fmul(((dtype*)a)[0],((dtype*)b)[0]); } void safemul(char const * a, char const * b, char *& c) const { if (a == NULL && b == NULL){ if (c!=NULL) CTF_int::cdealloc(c); c = NULL; } else if (a == NULL) { if (c==NULL) c = (char*)CTF_int::alloc(this->el_size); memcpy(c,b,this->el_size); } else if (b == NULL) { if (c==NULL) c = (char*)CTF_int::alloc(this->el_size); memcpy(c,b,this->el_size); } else { if (c==NULL) c = (char*)CTF_int::alloc(this->el_size); ((dtype*)c)[0] = fmul(((dtype*)a)[0],((dtype*)b)[0]); } } char const * mulid() const { return (char const *)&tmulid; } bool has_mul() const { return true; } /** \brief X["i"]=alpha*X["i"]; */ void scal(int n, char const * alpha, char * X, int incX) const { if (fscal != NULL) fscal(n, ((dtype const *)alpha)[0], (dtype *)X, incX); else { dtype const a = ((dtype*)alpha)[0]; dtype * dX = (dtype*) X; for (int64_t i=0; i<n; i++){ dX[i] = fmul(a,dX[i]); } } } /** \brief Y["i"]+=alpha*X["i"]; */ void axpy(int n, char const * alpha, char const * X, int incX, char * Y, int incY) const { if (faxpy != NULL) faxpy(n, ((dtype const *)alpha)[0], (dtype const *)X, incX, (dtype *)Y, incY); else { assert(incX==1); assert(incY==1); dtype a = ((dtype*)alpha)[0]; dtype const * dX = (dtype*) X; dtype * dY = (dtype*) Y; for (int64_t i=0; i<n; i++){ dY[i] = this->fadd(fmul(a,dX[i]), dY[i]); } } } /** \brief beta*C["ij"]=alpha*A^tA["ik"]*B^tB["kj"]; */ void gemm(char tA, char tB, int m, int n, int k, char const * alpha, char const * A, char const * B, char const * beta, char * C) const { if (fgemm != NULL) { fgemm(tA, tB, m, n, k, ((dtype const *)alpha)[0], (dtype const *)A, (dtype const *)B, ((dtype const *)beta)[0], (dtype *)C); } else { //TAU_FSTART(sring_gemm); dtype const * dA = (dtype const *) A; dtype const * dB = (dtype const *) B; dtype * dC = (dtype*) C; if (!this->isequal(beta, this->mulid())){ scal(m*n, beta, C, 1); } int lda_Cj, lda_Ci, lda_Al, lda_Ai, lda_Bj, lda_Bl; lda_Cj = m; lda_Ci = 1; if (tA == 'N'){ lda_Al = m; lda_Ai = 1; } else { assert(tA == 'T'); lda_Al = 1; lda_Ai = k; } if (tB == 'N'){ lda_Bj = k; lda_Bl = 1; } else { assert(tB == 'T'); lda_Bj = 1; lda_Bl = n; } if (!this->isequal(alpha, this->mulid())){ dtype a = ((dtype*)alpha)[0]; for (int64_t j=0; j<n; j++){ for (int64_t i=0; i<m; i++){ for (int64_t l=0; l<k; l++){ //dC[j*m+i] = this->fadd(fmul(a,fmul(dA[l*m+i],dB[j*k+l])), dC[j*m+i]); dC[j*lda_Cj+i*lda_Ci] = this->fadd(fmul(a,fmul(dA[l*lda_Al+i*lda_Ai],dB[j*lda_Bj+l*lda_Bl])), dC[j*lda_Cj+i*lda_Ci]); } } } } else { for (int64_t j=0; j<n; j++){ for (int64_t i=0; i<m; i++){ for (int64_t l=0; l<k; l++){ //dC[j*m+i] = this->fadd(fmul(a,fmul(dA[l*m+i],dB[j*k+l])), dC[j*m+i]); dC[j*lda_Cj+i*lda_Ci] = this->fadd(fmul(dA[l*lda_Al+i*lda_Ai],dB[j*lda_Bj+l*lda_Bl]), dC[j*lda_Cj+i*lda_Ci]); } } } } //TAU_FSTOP(sring_gemm); } } void gemm_batch(char tA, char tB, int l, int m, int n, int k, char const * alpha, char const * A, char const * B, char const * beta, char * C) const { if (fgemm_batch != NULL) { fgemm_batch(tA, tB, l, m, n, k, ((dtype const *)alpha)[0], ((dtype const *)A), ((dtype const *)B), ((dtype const *)beta)[0], ((dtype *)C)); } else { for (int i=0; i<l; i++){ gemm(tA, tB, m, n, k, alpha, A+m*k*i*sizeof(dtype), B+k*n*i*sizeof(dtype), beta, C+m*n*i*sizeof(dtype)); } } } void offload_gemm(char tA, char tB, int m, int n, int k, char const * alpha, char const * A, char const * B, char const * beta, char * C) const { printf("CTF ERROR: offload gemm not present for this semiring\n"); assert(0); } bool is_offloadable() const { return false; } void coomm(int m, int n, int k, char const * alpha, char const * A, int const * rows_A, int const * cols_A, int64_t nnz_A, char const * B, char const * beta, char * C, CTF_int::bivar_function const * func) const { if (func == NULL && alpha != NULL && fcoomm != NULL){ fcoomm(m, n, k, ((dtype const *)alpha)[0], (dtype const *)A, rows_A, cols_A, nnz_A, (dtype const *)B, ((dtype const *)beta)[0], (dtype *)C); return; } if (func == NULL && alpha != NULL && this->isequal(beta,mulid())){ //TAU_FSTART(func_coomm); dtype const * dA = (dtype const*)A; dtype const * dB = (dtype const*)B; dtype * dC = (dtype*)C; dtype a = ((dtype*)alpha)[0]; if (!this->isequal(beta, this->mulid())){ scal(m*n, beta, C, 1); } for (int64_t i=0; i<nnz_A; i++){ int row_A = rows_A[i]-1; int col_A = cols_A[i]-1; for (int col_C=0; col_C<n; col_C++){ dC[col_C*m+row_A] = this->fadd(fmul(a,fmul(dA[i],dB[col_C*k+col_A])), dC[col_C*m+row_A]); } } //TAU_FSTOP(func_coomm); } else { assert(0); } } void gen_csrmm (int m, int n, int k, dtype alpha, dtype const * A, int const * JA, int const * IA, int nnz_A, dtype const * B, dtype beta, dtype * C) const { #ifdef _OPENMP #pragma omp parallel for #endif for (int row_A=0; row_A<m; row_A++){ #ifdef _OPENMP #pragma omp parallel for #endif for (int col_B=0; col_B<n; col_B++){ C[col_B*m+row_A] = this->fmul(beta,C[col_B*m+row_A]); if (IA[row_A] < IA[row_A+1]){ int i_A1 = IA[row_A]-1; int col_A1 = JA[i_A1]-1; dtype tmp = this->fmul(A[i_A1],B[col_B*k+col_A1]); for (int i_A=IA[row_A]; i_A<IA[row_A+1]-1; i_A++){ int col_A = JA[i_A]-1; tmp = this->fadd(tmp, this->fmul(A[i_A],B[col_B*k+col_A])); } C[col_B*m+row_A] = this->fadd(C[col_B*m+row_A], this->fmul(alpha,tmp)); } } } } void default_csrmm (int m, int n, int k, dtype alpha, dtype const * A, int const * JA, int const * IA, int nnz_A, dtype const * B, dtype beta, dtype * C) const { gen_csrmm(m,n,k,alpha,A,JA,IA,nnz_A,B,beta,C); } // void (*fcsrmultd)(int,int,int,dtype const*,int const*,int const*,dtype const*,int const*, int const*,dtype*,int); /** \brief sparse version of gemm using CSR format for A */ void csrmm(int m, int n, int k, char const * alpha, char const * A, int const * JA, int const * IA, int64_t nnz_A, char const * B, char const * beta, char * C, CTF_int::bivar_function const * func) const { assert(!this->has_coo_ker); assert(func == NULL); if (is_def) this->default_csrmm(m,n,k,((dtype*)alpha)[0],(dtype*)A,JA,IA,nnz_A,(dtype*)B,((dtype*)beta)[0],(dtype*)C); else this->gen_csrmm(m,n,k,((dtype*)alpha)[0],(dtype*)A,JA,IA,nnz_A,(dtype*)B,((dtype*)beta)[0],(dtype*)C); } bool is_last_col_zero(int64_t m, int64_t n, dtype const * M) const { for (int64_t i=0; i<m; i++){ if (!this->isequal((char*)(M+(m*(n-1)+i)), (char*)&this->taddid)) return false; } return true; } void gen_ccsrmm (int64_t m, int64_t n0, int64_t k, int64_t nnz_row, dtype alpha, dtype const * A, int const * JA, int const * IA, int64_t const * row_enc, int64_t nnz_A, dtype const * B, dtype beta, char *& C_CCSR) const { CTF_int::CCSR_Matrix M; int64_t n = n0; if (this->is_last_col_zero(k, n, B)){ n = n0-1; } if (n == 0){ M = CTF_int::CCSR_Matrix(0, 0, m, 1, this); if (C_CCSR != NULL && !this->isequal((char const *)&beta, this->addid())){ CTF_int::CCSR_Matrix C(C_CCSR); if (!this->isequal((char const *)&beta, this->mulid())) this->scal(C.nnz(), (char*)&beta, C.all_data, 1); C_CCSR = CTF_int::CCSR_Matrix::ccsr_add(C.all_data, M.all_data, this); CTF_int::cdealloc(M.all_data); } else { //CTF_int::cdealloc(C_CCSR); C_CCSR = M.all_data; } return; } if (nnz_row == 0){ M = CTF_int::CCSR_Matrix(nnz_row*n, nnz_row, m, n, this); } else { int new_order[2] = {1, 0}; int64_t lens[2] = {(int64_t)nnz_row, (int64_t)n}; bool use_hptt = CTF_int::hptt_is_applicable(2, new_order, this->el_size); //Note: if there is padding last column of dense matrix would be full of zeros and we don't want to generate nonzeros for this colum, as this will cause tricky bugs! if (use_hptt){ char * data = this->alloc(((int64_t)nnz_row)*n); this->init_shell(((int64_t)nnz_row)*n, data); csrmm(nnz_row,n,k,(char const *)&alpha, (char const *)A, JA, IA, nnz_A, (char const*)B, this->mulid(), data, NULL); M = CTF_int::CCSR_Matrix(((int64_t)nnz_row)*n, nnz_row, m, n0, this); CTF_int::nosym_transpose_hptt(2, new_order, lens, 1, data, M.vals(), this); this->dealloc(data); } else { M = CTF_int::CCSR_Matrix(((int64_t)nnz_row)*n, nnz_row, m, n0, this); csrmm(nnz_row,n,k,(char const *)&alpha, (char const *)A, JA, IA, nnz_A, (char const*)B, this->mulid(), M.vals(), NULL); CTF_int::nosym_transpose(2,new_order,lens,M.vals(),1,this); } } memcpy(M.nnz_row_encoding(), row_enc, nnz_row*sizeof(int64_t)); int * C_IA = M.IA(); C_IA[0] = 1; for (int64_t row_A=1; row_A<nnz_row+1; row_A++){ C_IA[row_A] = C_IA[row_A-1] + n; } int * C_JA = M.JA(); #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t row_C=0; row_C<nnz_row; row_C++){ #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t col_C=0; col_C<n; col_C++){ C_JA[row_C*n+col_C] = col_C+1; } } if (C_CCSR != NULL && !this->isequal((char const *)&beta, this->addid())){ CTF_int::CCSR_Matrix C(C_CCSR); if (!this->isequal((char const *)&beta, this->mulid())) this->scal(C.nnz(), (char*)&beta, C.all_data, 1); C_CCSR = CTF_int::CCSR_Matrix::ccsr_add(C.all_data, M.all_data, this); CTF_int::cdealloc(M.all_data); } else { //CTF_int::cdealloc(C_CCSR); C_CCSR = M.all_data; } } void default_ccsrmm (int64_t m, int64_t n, int64_t k, int64_t nnz_row, dtype alpha, dtype const * A, int const * JA, int const * IA, int64_t const * row_enc, int64_t nnz_A, dtype const * B, dtype beta, char *& C) const { gen_ccsrmm(m,n,k,nnz_row,alpha,A,JA,IA,row_enc,nnz_A,B,beta,C); } // void (*fccsrmultd)(int,int,int,dtype const*,int const*,int const*,dtype const*,int const*, int const*,dtype*,int); /** \brief sparse version of gemm using CSR format for A */ void ccsrmm(int64_t m, int64_t n, int64_t k, int64_t nnz_row, char const * alpha, char const * A, int const * JA, int const * IA, int64_t const * row_enc, int64_t nnz_A, char const * B, char const * beta, char *& C, CTF_int::bivar_function const * func) const { assert(!this->has_coo_ker); assert(func == NULL); if (is_def) this->default_ccsrmm(m,n,k,nnz_row,((dtype*)alpha)[0],(dtype*)A,JA,IA,row_enc,nnz_A,(dtype*)B,((dtype*)beta)[0],C); else this->gen_ccsrmm(m,n,k,nnz_row,((dtype*)alpha)[0],(dtype*)A,JA,IA,row_enc,nnz_A,(dtype*)B,((dtype*)beta)[0],C); } void gen_csrmultd (int m, int n, int k, dtype alpha, dtype const * A, int const * JA, int const * IA, int nnz_A, dtype const * B, int const * JB, int const * IB, int nnz_B, dtype beta, dtype * C) const { if (!this->isequal((char const*)&beta, this->mulid())){ this->scal(m*n, (char const *)&beta, (char*)C, 1); } #ifdef _OPENMP #pragma omp parallel for #endif for (int row_A=0; row_A<m; row_A++){ for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){ int row_B = JA[i_A]-1; //=col_A for (int i_B=IB[row_B]-1; i_B<IB[row_B+1]-1; i_B++){ int col_B = JB[i_B]-1; if (!this->isequal((char const*)&alpha, this->mulid())) this->fadd(C[col_B*m+row_A], this->fmul(alpha,this->fmul(A[i_A],B[i_B]))); else this->fadd(C[col_B*m+row_A], this->fmul(A[i_A],B[i_B])); } } } } void default_csrmultd (int m, int n, int k, dtype alpha, dtype const * A, int const * JA, int const * IA, int nnz_A, dtype const * B, int const * JB, int const * IB, int nnz_B, dtype beta, dtype * C) const { gen_csrmultd(m,n,k,alpha,A,JA,IA,nnz_A,B,JB,IB,nnz_B,beta,C); } void gen_csrmultcsr (int m, int n, int k, dtype alpha, dtype const * A, // A m by k int const * JA, int const * IA, int nnz_A, dtype const * B, // B k by n int const * JB, int const * IB, int nnz_B, dtype beta, char *& C_CSR) const { int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1)); memset(IC, 0, sizeof(int)*(m+1)); #ifdef _OPENMP #pragma omp parallel { #endif int * has_col = (int*)CTF_int::alloc(sizeof(int)*(n+1)); //n is the num of col of B int nnz = 0; #ifdef _OPENMP #pragma omp for schedule(dynamic) // TO DO test other strategies #endif for (int i=0; i<m; i++){ memset(has_col, 0, sizeof(int)*(n+1)); nnz = 0; for (int j=0; j<IA[i+1]-IA[i]; j++){ int row_B = JA[IA[i]+j-1]-1; for (int kk=0; kk<IB[row_B+1]-IB[row_B]; kk++){ int idx_B = IB[row_B]+kk-1; if (has_col[JB[idx_B]] == 0){ nnz++; has_col[JB[idx_B]] = 1; } } IC[i+1]=nnz; } } CTF_int::cdealloc(has_col); #ifdef _OPENMP } // END PARALLEL #endif int ic_prev = 1; for(int i=0;i < m+1; i++){ ic_prev += IC[i]; IC[i] = ic_prev; } CTF_int::CSR_Matrix C(IC[m]-1, m, n, this); dtype * vC = (dtype*)C.vals(); this->set((char *)vC, this->addid(), IC[m]+1); int * JC = C.JA(); memcpy(C.IA(), IC, sizeof(int)*(m+1)); CTF_int::cdealloc(IC); IC = C.IA(); #ifdef _OPENMP #pragma omp parallel { #endif int ins = 0; int *dcol = (int *) CTF_int::alloc(n*sizeof(int)); dtype *acc_data = (dtype*)CTF_int::alloc(n*sizeof (dtype)); #ifdef _OPENMP #pragma omp for #endif for (int i=0; i<m; i++){ std::fill(acc_data, acc_data+n, this->taddid); memset(dcol, 0, sizeof(int)*(n)); ins = 0; for (int j=0; j<IA[i+1]-IA[i]; j++){ int row_b = JA[IA[i]+j-1]-1; // 1-based int idx_a = IA[i]+j-1; for (int ii = 0; ii < IB[row_b+1]-IB[row_b]; ii++){ int col_b = IB[row_b]+ii-1; int col_c = JB[col_b]-1; // 1-based dtype val = fmul(A[idx_a], B[col_b]); if (dcol[col_c] == 0){ dcol[col_c] = JB[col_b]; } //acc_data[col_c] += val; acc_data[col_c]= this->fadd(acc_data[col_c], val); } } for(int jj = 0; jj < n; jj++){ if (dcol[jj] != 0){ JC[IC[i]+ins-1] = dcol[jj]; vC[IC[i]+ins-1] = acc_data[jj]; ++ins; } } } CTF_int::cdealloc(dcol); CTF_int::cdealloc(acc_data); #ifdef _OPENMP } //PRAGMA END #endif CTF_int::CSR_Matrix C_in(C_CSR); if (!this->isequal((char const *)&alpha, this->mulid())){ this->scal(C.nnz(), (char const *)&alpha, C.vals(), 1); } if (C_CSR == NULL || C_in.nnz() == 0 || this->isequal((char const *)&beta, this->addid())){ C_CSR = C.all_data; } else { if (!this->isequal((char const *)&beta, this->mulid())){ this->scal(C_in.nnz(), (char const *)&beta, C_in.vals(), 1); } char * ans = this->csr_add(C_CSR, C.all_data, false); CTF_int::cdealloc(C.all_data); C_CSR = ans; } } /* void gen_csrmultcsr_old (int m, int n, int k, dtype alpha, dtype const * A, int const * JA, int const * IA, int nnz_A, dtype const * B, int const * JB, int const * IB, int nnz_B, dtype beta, char *& C_CSR) const { int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1)); int * has_col = (int*)CTF_int::alloc(sizeof(int)*n); IC[0] = 1; for (int i=0; i<m; i++){ memset(has_col, 0, sizeof(int)*n); IC[i+1] = IC[i]; CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col); for (int j=0; j<n; j++){ IC[i+1] += has_col[j]; } } CTF_int::CSR_Matrix C(IC[m]-1, m, n, sizeof(dtype)); dtype * vC = (dtype*)C.vals(); this->set((char *)vC, this->addid(), IC[m]-1); int * JC = C.JA(); memcpy(C.IA(), IC, sizeof(int)*(m+1)); CTF_int::cdealloc(IC); IC = C.IA(); int64_t * rev_col = (int64_t*)CTF_int::alloc(sizeof(int64_t)*n); for (int i=0; i<m; i++){ memset(has_col, 0, sizeof(int)*n); CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col); int vs = 0; for (int j=0; j<n; j++){ if (has_col[j]){ JC[IC[i]+vs-1] = j+1; rev_col[j] = IC[i]+vs-1; vs++; } } for (int j=0; j<IA[i+1]-IA[i]; j++){ int row_B = JA[IA[i]+j-1]-1; int idx_A = IA[i]+j-1; for (int l=0; l<IB[row_B+1]-IB[row_B]; l++){ int idx_B = IB[row_B]+l-1; dtype tmp = fmul(A[idx_A],B[idx_B]); vC[(rev_col[JB[idx_B]-1])] = this->fadd(vC[(rev_col[JB[idx_B]-1])], tmp); } } } CTF_int::CSR_Matrix C_in(C_CSR); if (!this->isequal((char const *)&alpha, this->mulid())){ this->scal(C.nnz(), (char const *)&alpha, C.vals(), 1); } if (C_CSR == NULL || C_in.nnz() == 0 || this->isequal((char const *)&beta, this->addid())){ C_CSR = C.all_data; } else { if (!this->isequal((char const *)&beta, this->mulid())){ this->scal(C_in.nnz(), (char const *)&beta, C_in.vals(), 1); } char * ans = this->csr_add(C_CSR, C.all_data); CTF_int::cdealloc(C.all_data); C_CSR = ans; } CTF_int::cdealloc(has_col); CTF_int::cdealloc(rev_col); }*/ void default_csrmultcsr (int m, int n, int k, dtype alpha, dtype const * A, int const * JA, int const * IA, int nnz_A, dtype const * B, int const * JB, int const * IB, int nnz_B, dtype beta, char *& C_CSR) const { this->gen_csrmultcsr(m,n,k,alpha,A,JA,IA,nnz_A,B,JB,IB,nnz_B,beta,C_CSR); } void csrmultd (int m, int n, int k, char const * alpha, char const * A, int const * JA, int const * IA, int64_t nnz_A, char const * B, int const * JB, int const * IB, int64_t nnz_B, char const * beta, char * C) const { if (is_def) this->default_csrmultd(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],(dtype*)C); else this->gen_csrmultd(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],(dtype*)C); } void csrmultcsr (int m, int n, int k, char const * alpha, char const * A, int const * JA, int const * IA, int64_t nnz_A, char const * B, int const * JB, int const * IB, int64_t nnz_B, char const * beta, char *& C_CSR) const { if (is_def){ this->default_csrmultcsr(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],C_CSR); } else { this->gen_csrmultcsr(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],C_CSR); } } void accumulate_local_slice(int order, int64_t * lens, int64_t * lens_slice, int const * sym, int64_t const * offsets, int64_t const * ends, char const * slice_data, char const * alpha, char * tensor_data, char const * beta) const { dtype const * sdata = (dtype const*)slice_data; dtype * tdata = (dtype*)tensor_data; if (order == 1){ dtype a = ((dtype*)alpha)[0]; dtype b = ((dtype*)beta)[0]; for (int64_t i=offsets[0]; i<ends[0]; i++){ tdata[i] = this->fadd(this->fmul(b,tdata[i]),this->fmul(a,sdata[i-offsets[0]])); } } else { int64_t lda_tensor = 1; int64_t lda_slice = 1; for (int64_t i=0; i<order-1; i++){ lda_tensor *= lens[i]; lda_slice *= lens_slice[i]; } for (int64_t i=offsets[order-1]; i<ends[order-1]; i++){ this->accumulate_local_slice(order-1, lens, lens_slice, sym, offsets, ends, (char const*)(sdata + (i-offsets[order-1])*lda_slice), alpha, (char *)(tdata + i*lda_tensor), beta); } } } void MTTKRP(int order, int64_t * lens, int * phys_phase, int64_t k, int64_t nnz, int out_mode, bool aux_mode_first, CTF::Pair<dtype> const * tsr_data, dtype const * const * op_mats, dtype * out_mat){ if (aux_mode_first){ dtype * buffer = (dtype*)this->alloc(k); dtype * out_buffer; if (out_mode != 0) out_buffer = (dtype*)this->alloc(k); int64_t * inds = (int64_t*)malloc(sizeof(int64_t)*(order-1)); int64_t idx = 0; while (idx < nnz){ int64_t fiber_idx = tsr_data[idx].k/lens[0]; int64_t fi = fiber_idx; for (int i=0; i<order-1; i++){ inds[i] = (fi % lens[i+1])/phys_phase[i+1]; fi = fi / lens[i+1]; } int64_t fiber_nnz = 1; while (idx+fiber_nnz < nnz && tsr_data[idx+fiber_nnz].k/lens[0] == fiber_idx) fiber_nnz++; if (out_mode == 0){ memcpy(buffer, op_mats[1] + inds[0]*k, k*sizeof(dtype)); for (int i=1; i<order-1; i++){ fvmul(buffer, op_mats[i+1]+inds[i]*k, buffer, k); } for (int64_t i=idx; i<idx+fiber_nnz; i++){ int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0]; this->faxpy(k, tsr_data[i].d, buffer, 1, out_mat+kk*k, 1); } } else { int64_t ok = inds[out_mode-1]; if (out_mode > 1) memcpy(buffer, op_mats[1] + inds[0]*k, k*sizeof(dtype)); else if (order > 2) memcpy(buffer, op_mats[2] + inds[1]*k, k*sizeof(dtype)); else std::fill(buffer, buffer+k, this->tmulid); for (int i=1+(out_mode==1); i<order-1; i++){ if (out_mode != i+1) fvmul(buffer, op_mats[i+1] + inds[i]*k, buffer, k); } std::fill(out_buffer, out_buffer+k, this->taddid); for (int64_t i=idx; i<idx+fiber_nnz; i++){ int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0]; this->faxpy(k, tsr_data[i].d, op_mats[0] + kk*k, 1, out_buffer, 1); } fvmul(out_buffer, buffer, out_buffer, k); this->faxpy(k, this->tmulid, out_buffer, 1, out_mat + ok*k, 1); //for (int j=0; j<k; j++){ // out_mat[j+ok*k] += out_buffer[j]*buffer[j]; //} } idx += fiber_nnz; } if (out_mode != 0) this->dealloc((char*)out_buffer); this->dealloc((char*)buffer); free(inds); } else { IASSERT(0); } } void MTTKRP(int order, int64_t * lens, int * phys_phase, int64_t nnz, int out_mode, CTF::Pair<dtype> const * tsr_data, dtype const * const * op_vecs, dtype * out_vec){ int64_t * inds = (int64_t*)malloc(sizeof(int64_t)*(order-1)); int64_t idx = 0; while (idx < nnz){ int64_t fiber_idx = tsr_data[idx].k/lens[0]; int64_t fi = fiber_idx; for (int i=0; i<order-1; i++){ inds[i] = (fi % lens[i+1])/phys_phase[i+1]; fi = fi / lens[i+1]; } int64_t fiber_nnz = 1; while (idx+fiber_nnz < nnz && tsr_data[idx+fiber_nnz].k/lens[0] == fiber_idx) fiber_nnz++; if (out_mode == 0){ dtype buf_val = op_vecs[1][inds[0]]; for (int i=1; i<order-1; i++){ buf_val *= op_vecs[i+1][inds[i]]; } for (int64_t i=idx; i<idx+fiber_nnz; i++){ int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0]; out_vec[kk] += tsr_data[i].d*buf_val; } } else { int64_t ok = inds[out_mode-1]; dtype buf_val = op_vecs[1][inds[0]]; if (out_mode > 1) buf_val = op_vecs[1][inds[0]]; else if (order > 2) buf_val = op_vecs[2][inds[1]]; else buf_val = this->tmulid; for (int i=1+(out_mode==1); i<order-1; i++){ if (out_mode != i+1) buf_val *= op_vecs[i+1][inds[i]]; } dtype buf_val2 = this->taddid; for (int64_t i=idx; i<idx+fiber_nnz; i++){ int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0]; buf_val2 += tsr_data[i].d*op_vecs[0][kk]; } out_vec[ok] += buf_val*buf_val2; } idx += fiber_nnz; } free(inds); } }; /** * @} */ } namespace CTF { // TODO: add these with manual loop // template <> // bool CTF::Semiring<float,1>::is_last_col_zero(int64_t m, int64_t n, float const * M) const; template <> bool CTF::Semiring<double,1>::is_last_col_zero(int64_t m, int64_t n, double const * M) const; // template <> // bool CTF::Semiring<std::complex<float>,0>::is_last_col_zero(int64_t m, int64_t n, std::complex<float> const * M) const; // template <> // void CTF::Semiring<std::complex<double>,0>::is_last_col_zero(int64_t m, int64_t n, std::complex<double> const * M) const; template <> void CTF::Semiring<float,1>::default_csrmm(int,int,int,float,float const *,int const *,int const *,int,float const *,float,float *) const; template <> void CTF::Semiring<double,1>::default_csrmm(int,int,int,double,double const *,int const *,int const *,int,double const *,double,double *) const; template <> void CTF::Semiring<std::complex<float>,0>::default_csrmm(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,std::complex<float>,std::complex<float> *) const; template <> void CTF::Semiring<std::complex<double>,0>::default_csrmm(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,std::complex<double>,std::complex<double> *) const; template <> void CTF::Semiring<float,1>::default_csrmultd(int,int,int,float,float const *,int const *,int const *,int,float const *,int const *,int const *,int,float,float *) const; template <> void CTF::Semiring<double,1>::default_csrmultd(int,int,int,double,double const *,int const *,int const *,int,double const *,int const *,int const *,int,double,double *) const; template <> void CTF::Semiring<std::complex<float>,0>::default_csrmultd(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,int const *,int const *,int,std::complex<float>,std::complex<float> *) const; template <> void CTF::Semiring<std::complex<double>,0>::default_csrmultd(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,int const *,int const *,int,std::complex<double>,std::complex<double> *) const; template <> void CTF::Semiring<float,1>::default_csrmultcsr(int,int,int,float,float const *,int const *,int const *,int,float const *,int const *,int const *,int,float,char *&) const; template <> void CTF::Semiring<double,1>::default_csrmultcsr(int,int,int,double,double const *,int const *,int const *,int,double const *,int const *,int const *,int,double,char *&) const; template <> void CTF::Semiring<std::complex<float>,0>::default_csrmultcsr(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,int const *,int const *,int,std::complex<float>,char *&) const; template <> void CTF::Semiring<std::complex<double>,0>::default_csrmultcsr(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,int const *,int const *,int,std::complex<double>,char *&) const; template<> bool CTF::Semiring<double,1>::is_offloadable() const; template<> bool CTF::Semiring<float,1>::is_offloadable() const; template<> bool CTF::Semiring<std::complex<float>,0>::is_offloadable() const; template<> bool CTF::Semiring<std::complex<double>,0>::is_offloadable() const; template<> void CTF::Semiring<double,1>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const; template<> void CTF::Semiring<double,1>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const; template<> void CTF::Semiring<std::complex<float>,0>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const; template<> void CTF::Semiring<std::complex<double>,0>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const; } #include "ring.h" #endif
gemm.c
#include "gemm.h" #include "utils.h" #include "opencl.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = (float*)calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU #ifndef ARM #include "clBLAS.h" #endif void gemm_kernel_init(void) { #ifndef ARM cl_int clErr; clErr = clblasSetup(); if (clErr != CL_SUCCESS) { printf("gemm_kernel_init: Could not setup clBLAS. Errorcode: %d\n", clErr); } #endif } void gemm_kernel_release(void) { #ifndef ARM clblasTeardown(); #endif } cl_mem_ext random_matrix_gpu(int rows, int cols) { int i; float *m = (float*)calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return opencl_make_array(m, rows*cols); } #if !defined(GPU_MULTI) && !defined(ARM) void gemm_offset_gpu( int TA, int TB, int M, int N, int K, float ALPHA, cl_mem_ext A_gpu, int offset_A, int lda, cl_mem_ext B_gpu, int offset_B, int ldb, float BETA, cl_mem_ext C_gpu, int offset_C, int ldc) { #ifdef BENCHMARK clock_t t; t = clock(); #endif cl_int clErr; cl_command_queue que = opencl_queues[opencl_device_id_t]; clErr = clblasSgemm(clblasRowMajor, (TA ? clblasTrans : clblasNoTrans), (TB ? clblasTrans : clblasNoTrans), M, N, K, ALPHA, A_gpu.mem, offset_A, lda, B_gpu.mem, offset_B, ldb, BETA, C_gpu.mem, offset_C, ldc, 1, &que, 0, NULL, NULL); // clFlush(que); #ifdef BENCHMARK t = clock() - t; double time_taken = ((double)t); printf("%s\t%d\n", "clblasSgemm", (int)time_taken); #endif if (clErr != CL_SUCCESS) { printf("gemm_gpu: clblasSgemm failed. Errorcode: %d\n", clErr); } } #endif void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, cl_mem_ext A_gpu, int lda, cl_mem_ext B_gpu, int ldb, float BETA, cl_mem_ext C_gpu, int ldc) { #ifdef BENCHMARK clock_t t; t = clock(); #endif gemm_offset_gpu(TA, TB, M, N, K, ALPHA, A_gpu, 0, lda, B_gpu, 0, ldb, BETA, C_gpu, 0, ldc); #ifdef BENCHMARK t = clock() - t; double time_taken = ((double)t); printf("%s\t%d\n", "gemm_offset_gpu", (int)time_taken); #endif } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { cl_mem_ext a; if(!TA) a = random_matrix_gpu(m,k); else a = random_matrix_gpu(k,m); int lda = (!TA)?k:m; cl_mem_ext b; if(!TB) b = random_matrix_gpu(k,n); else b = random_matrix_gpu(n,k); int ldb = (!TB)?n:k; cl_mem_ext c = random_matrix_gpu(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); opencl_free(a); opencl_free(b); opencl_free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); cl_mem_ext a_cl = opencl_make_array(a, m*k); cl_mem_ext b_cl = opencl_make_array(b, k*n); cl_mem_ext c_cl = opencl_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); clFinish(opencl_queues[opencl_device_id_t]); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); opencl_free(a_cl); opencl_free(b_cl); opencl_free(c_cl); free(a); free(b); free(c); } /* TODO: THINK ABOUT IT?! void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); cl_mem_ext a; if(!TA) a = random_matrix_gpu(m,k); else a = random_matrix_gpu(k,m); int lda = (!TA)?k:m; cl_mem_ext b; if(!TB) b = random_matrix_gpu(k,n); else b = random_matrix_gpu(n,k); int ldb = (!TB)?n:k; cl_mem_ext c = random_matrix_gpu(m,n); cl_mem_ext c_gpu = random_matrix_gpu(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); opencl_free(a); opencl_free(b); opencl_free(c); opencl_free(c_gpu); } */ int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
GB_unop__abs_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_fp64_fc64) // op(A') function: GB (_unop_tran__abs_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = cabs (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cabs (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = cabs (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = cabs (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = cabs (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ex_particle_OPENMP_seq.cuda.c
#include <stdio.h> __device__ inline int hclib_get_current_worker() { return blockIdx.x * blockDim.x + threadIdx.x; } template<class functor_type> __global__ void wrapper_kernel(unsigned iter_offset, unsigned niters, functor_type functor) { const int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < niters) { functor(iter_offset + tid); } } template<class functor_type> static void kernel_launcher(const char *kernel_lbl, unsigned iter_offset, unsigned niters, functor_type functor) { const int threads_per_block = 256; const int nblocks = (niters + threads_per_block - 1) / threads_per_block; functor.transfer_to_device(); const unsigned long long start = capp_current_time_ns(); wrapper_kernel<<<nblocks, threads_per_block>>>(iter_offset, niters, functor); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error while synchronizing kernel - %s\n", cudaGetErrorString(err)); exit(2); } const unsigned long long end = capp_current_time_ns(); fprintf(stderr, "%s %llu ns\n", kernel_lbl, end - start); functor.transfer_from_device(); } #ifdef __cplusplus #ifdef __CUDACC__ #endif #endif /** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <limits.h> #include <time.h> #include <string.h> #define PI 3.1415926535897932 /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; /***************************** *GET_TIME *returns a long int representing the time *****************************/ long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } // Returns the number of seconds elapsed between the two specified times float elapsed_time(long long start_time, long long end_time) { return (float) (end_time - start_time) / (1000 * 1000); } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y]){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = abs(x0 + (k-1)); yk = abs(y0 - 2*(k-1)); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((I[ind[y]] - 100),2) - pow((I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses binary search before switching to sequential search * @param CDF The CDF * @param beginIndex The index to start searching from * @param endIndex The index to stop searching * @param value The value to find * @return The index of value in the CDF; if value is never found, returns the last index * @warning Use at your own risk; not fully tested */ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value){ if(endIndex < beginIndex) return -1; int middleIndex = beginIndex + ((endIndex - beginIndex)/2); /*check the value*/ if(CDF[middleIndex] >= value) { /*check that it's good*/ if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(middleIndex > 0 && CDF[middleIndex-1] == value) middleIndex--; return middleIndex; } } if(CDF[middleIndex] > value) return findIndexBin(CDF, beginIndex, middleIndex+1, value); return findIndexBin(CDF, middleIndex-1, endIndex, value); } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ class pragma373_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile weights; double* volatile h_weights; int x; volatile int Nparticles; public: pragma373_omp_parallel_hclib_async(double* set_weights, int set_x, int set_Nparticles) { h_weights = set_weights; x = set_x; Nparticles = set_Nparticles; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_weights); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { weights[x] = 1/((double)(Nparticles)); } } } }; class pragma388_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile arrayX; double* volatile h_arrayX; int x; volatile double xe; double* volatile arrayY; double* volatile h_arrayY; volatile double ye; public: pragma388_omp_parallel_hclib_async(double* set_arrayX, int set_x, double set_xe, double* set_arrayY, double set_ye) { h_arrayX = set_arrayX; x = set_x; xe = set_xe; h_arrayY = set_arrayY; ye = set_ye; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 2, h_arrayX, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { arrayX[x] = xe; arrayY[x] = ye; } } } }; class pragma402_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; __device__ double randn(int * seed, int index) { { /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } } __device__ double randu(int * seed, int index) { { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } } double* volatile arrayX; double* volatile h_arrayX; int x; volatile int A; volatile int C; volatile long M; int* volatile seed; int* volatile h_seed; double* volatile arrayY; double* volatile h_arrayY; public: pragma402_omp_parallel_hclib_async(double* set_arrayX, int set_x, int set_A, int set_C, long set_M, int* set_seed, double* set_arrayY) { h_arrayX = set_arrayX; x = set_x; A = set_A; C = set_C; M = set_M; h_seed = set_seed; h_arrayY = set_arrayY; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; seed = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 3, h_arrayX, h_seed, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (seed == NULL && (char *)h_seed >= (char *)host_allocations[i] && ((char *)h_seed - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_seed - (char *)host_allocations[i]); memcpy((void *)(&seed), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(seed || h_seed == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } } } }; class pragma410_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; __device__ double roundDouble(double value) { { int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } } int y; volatile int countOnes; int indX; double* volatile arrayX; double* volatile h_arrayX; int x; double* volatile objxy; double* volatile h_objxy; int indY; double* volatile arrayY; double* volatile h_arrayY; int* volatile ind; int* volatile h_ind; volatile int IszY; volatile int Nfr; volatile int k; volatile int max_size; double* volatile likelihood; double* volatile h_likelihood; int* volatile I; int* volatile h_I; public: pragma410_omp_parallel_hclib_async(int set_y, int set_countOnes, int set_indX, double* set_arrayX, int set_x, double* set_objxy, int set_indY, double* set_arrayY, int* set_ind, int set_IszY, int set_Nfr, int set_k, int set_max_size, double* set_likelihood, int* set_I) { y = set_y; countOnes = set_countOnes; indX = set_indX; h_arrayX = set_arrayX; x = set_x; h_objxy = set_objxy; indY = set_indY; h_arrayY = set_arrayY; h_ind = set_ind; IszY = set_IszY; Nfr = set_Nfr; k = set_k; max_size = set_max_size; h_likelihood = set_likelihood; h_I = set_I; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; objxy = NULL; arrayY = NULL; ind = NULL; likelihood = NULL; I = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 6, h_arrayX, h_objxy, h_arrayY, h_ind, h_likelihood, h_I); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (objxy == NULL && (char *)h_objxy >= (char *)host_allocations[i] && ((char *)h_objxy - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_objxy - (char *)host_allocations[i]); memcpy((void *)(&objxy), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } if (ind == NULL && (char *)h_ind >= (char *)host_allocations[i] && ((char *)h_ind - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_ind - (char *)host_allocations[i]); memcpy((void *)(&ind), (void *)(&tmp), sizeof(void *)); } if (likelihood == NULL && (char *)h_likelihood >= (char *)host_allocations[i] && ((char *)h_likelihood - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_likelihood - (char *)host_allocations[i]); memcpy((void *)(&likelihood), (void *)(&tmp), sizeof(void *)); } if (I == NULL && (char *)h_I >= (char *)host_allocations[i] && ((char *)h_I - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_I - (char *)host_allocations[i]); memcpy((void *)(&I), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(objxy || h_objxy == NULL); assert(arrayY || h_arrayY == NULL); assert(ind || h_ind == NULL); assert(likelihood || h_likelihood == NULL); assert(I || h_I == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[x*countOnes + y] = fabs((double)(indX*IszY*Nfr + indY*Nfr + k)); if(ind[x*countOnes + y] >= max_size) ind[x*countOnes + y] = 0; } likelihood[x] = 0; for(y = 0; y < countOnes; y++) likelihood[x] += (pow((I[ind[x*countOnes + y]] - 100),2) - pow((I[ind[x*countOnes + y]]-228),2))/50.0; likelihood[x] = likelihood[x]/((double) countOnes); } } } }; class pragma433_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile weights; double* volatile h_weights; int x; double* volatile likelihood; double* volatile h_likelihood; public: pragma433_omp_parallel_hclib_async(double* set_weights, int set_x, double* set_likelihood) { h_weights = set_weights; x = set_x; h_likelihood = set_likelihood; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; likelihood = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 2, h_weights, h_likelihood); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } if (likelihood == NULL && (char *)h_likelihood >= (char *)host_allocations[i] && ((char *)h_likelihood - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_likelihood - (char *)host_allocations[i]); memcpy((void *)(&likelihood), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); assert(likelihood || h_likelihood == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { weights[x] = weights[x] * exp(likelihood[x]); } } } }; class pragma440_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double sumWeights; double* volatile weights; double* volatile h_weights; int x; public: pragma440_omp_parallel_hclib_async(double set_sumWeights, double* set_weights, int set_x) { sumWeights = set_sumWeights; h_weights = set_weights; x = set_x; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_weights); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { sumWeights += weights[x]; } } } }; class pragma446_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile weights; double* volatile h_weights; int x; volatile double sumWeights; public: pragma446_omp_parallel_hclib_async(double* set_weights, int set_x, double set_sumWeights) { h_weights = set_weights; x = set_x; sumWeights = set_sumWeights; } void transfer_to_device() { int i; cudaError_t err; weights = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_weights); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } } assert(weights || h_weights == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { weights[x] = weights[x]/sumWeights; } } } }; class pragma455_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double xe; double* volatile arrayX; double* volatile h_arrayX; int x; double* volatile weights; double* volatile h_weights; double ye; double* volatile arrayY; double* volatile h_arrayY; public: pragma455_omp_parallel_hclib_async(double set_xe, double* set_arrayX, int set_x, double* set_weights, double set_ye, double* set_arrayY) { xe = set_xe; h_arrayX = set_arrayX; x = set_x; h_weights = set_weights; ye = set_ye; h_arrayY = set_arrayY; } void transfer_to_device() { int i; cudaError_t err; arrayX = NULL; weights = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 3, h_arrayX, h_weights, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (weights == NULL && (char *)h_weights >= (char *)host_allocations[i] && ((char *)h_weights - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_weights - (char *)host_allocations[i]); memcpy((void *)(&weights), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(arrayX || h_arrayX == NULL); assert(weights || h_weights == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } } } }; class pragma480_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; double* volatile u; double* volatile h_u; int x; volatile double u1; volatile int Nparticles; public: pragma480_omp_parallel_hclib_async(double* set_u, int set_x, double set_u1, int set_Nparticles) { h_u = set_u; x = set_x; u1 = set_u1; Nparticles = set_Nparticles; } void transfer_to_device() { int i; cudaError_t err; u = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 1, h_u); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (u == NULL && (char *)h_u >= (char *)host_allocations[i] && ((char *)h_u - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_u - (char *)host_allocations[i]); memcpy((void *)(&u), (void *)(&tmp), sizeof(void *)); } } assert(u || h_u == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int x) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { u[x] = u1 + x/((double)(Nparticles)); } } } }; class pragma488_omp_parallel_hclib_async { private: void **host_allocations; size_t *host_allocation_sizes; unsigned nallocations; void **device_allocations; __device__ int findIndex(double * CDF, int lengthCDF, double value) { { int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } } int i; double* volatile CDF; double* volatile h_CDF; volatile int Nparticles; double* volatile u; double* volatile h_u; int j; double* volatile xj; double* volatile h_xj; double* volatile arrayX; double* volatile h_arrayX; double* volatile yj; double* volatile h_yj; double* volatile arrayY; double* volatile h_arrayY; public: pragma488_omp_parallel_hclib_async(int set_i, double* set_CDF, int set_Nparticles, double* set_u, int set_j, double* set_xj, double* set_arrayX, double* set_yj, double* set_arrayY) { i = set_i; h_CDF = set_CDF; Nparticles = set_Nparticles; h_u = set_u; j = set_j; h_xj = set_xj; h_arrayX = set_arrayX; h_yj = set_yj; h_arrayY = set_arrayY; } void transfer_to_device() { int i; cudaError_t err; CDF = NULL; u = NULL; xj = NULL; arrayX = NULL; yj = NULL; arrayY = NULL; get_underlying_allocations(&host_allocations, &host_allocation_sizes, &nallocations, 6, h_CDF, h_u, h_xj, h_arrayX, h_yj, h_arrayY); device_allocations = (void **)malloc(nallocations * sizeof(void *)); for (i = 0; i < nallocations; i++) { err = cudaMalloc((void **)&device_allocations[i], host_allocation_sizes[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaMemcpy((void *)device_allocations[i], (void *)host_allocations[i], host_allocation_sizes[i], cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } if (CDF == NULL && (char *)h_CDF >= (char *)host_allocations[i] && ((char *)h_CDF - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_CDF - (char *)host_allocations[i]); memcpy((void *)(&CDF), (void *)(&tmp), sizeof(void *)); } if (u == NULL && (char *)h_u >= (char *)host_allocations[i] && ((char *)h_u - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_u - (char *)host_allocations[i]); memcpy((void *)(&u), (void *)(&tmp), sizeof(void *)); } if (xj == NULL && (char *)h_xj >= (char *)host_allocations[i] && ((char *)h_xj - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_xj - (char *)host_allocations[i]); memcpy((void *)(&xj), (void *)(&tmp), sizeof(void *)); } if (arrayX == NULL && (char *)h_arrayX >= (char *)host_allocations[i] && ((char *)h_arrayX - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayX - (char *)host_allocations[i]); memcpy((void *)(&arrayX), (void *)(&tmp), sizeof(void *)); } if (yj == NULL && (char *)h_yj >= (char *)host_allocations[i] && ((char *)h_yj - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_yj - (char *)host_allocations[i]); memcpy((void *)(&yj), (void *)(&tmp), sizeof(void *)); } if (arrayY == NULL && (char *)h_arrayY >= (char *)host_allocations[i] && ((char *)h_arrayY - (char *)host_allocations[i]) < host_allocation_sizes[i]) { char *tmp = (char *)device_allocations[i] + ((char *)h_arrayY - (char *)host_allocations[i]); memcpy((void *)(&arrayY), (void *)(&tmp), sizeof(void *)); } } assert(CDF || h_CDF == NULL); assert(u || h_u == NULL); assert(xj || h_xj == NULL); assert(arrayX || h_arrayX == NULL); assert(yj || h_yj == NULL); assert(arrayY || h_arrayY == NULL); } void transfer_from_device() { cudaError_t err; int i; for (i = 0; i < nallocations; i++) { err = cudaMemcpy((void *)host_allocations[i], (void *)device_allocations[i], host_allocation_sizes[i], cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } err = cudaFree(device_allocations[i]); if (err != cudaSuccess) { fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); exit(3); } } } __device__ void operator()(int j) { for (int __dummy_iter = 0; __dummy_iter < 1; __dummy_iter++) { { i = findIndex(CDF, Nparticles, u[j]); if(i == -1) i = Nparticles-1; xj[j] = arrayX[i]; yj[j] = arrayY[i]; } } } }; void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma373_omp_parallel", iters_offset, niters, pragma373_omp_parallel_hclib_async(weights, x, Nparticles)); } long long get_weights = get_time(); printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); double * u = (double *)malloc(sizeof(double)*Nparticles); int * ind = (int*)malloc(sizeof(int)*countOnes*Nparticles); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma388_omp_parallel", iters_offset, niters, pragma388_omp_parallel_hclib_async(arrayX, x, xe, arrayY, ye)); } int k; printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, get_time())); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma402_omp_parallel", iters_offset, niters, pragma402_omp_parallel_hclib_async(arrayX, x, A, C, M, seed, arrayY)); } long long error = get_time(); printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); //particle filter likelihood { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma410_omp_parallel", iters_offset, niters, pragma410_omp_parallel_hclib_async(y, countOnes, indX, arrayX, x, objxy, indY, arrayY, ind, IszY, Nfr, k, max_size, likelihood, I)); } long long likelihood_time = get_time(); printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma433_omp_parallel", iters_offset, niters, pragma433_omp_parallel_hclib_async(weights, x, likelihood)); } long long exponential = get_time(); printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma440_omp_parallel", iters_offset, niters, pragma440_omp_parallel_hclib_async(sumWeights, weights, x)); } long long sum_time = get_time(); printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma446_omp_parallel", iters_offset, niters, pragma446_omp_parallel_hclib_async(weights, x, sumWeights)); } long long normalize = get_time(); printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma455_omp_parallel", iters_offset, niters, pragma455_omp_parallel_hclib_async(xe, arrayX, x, weights, ye, arrayY)); } long long move_time = get_time(); printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma480_omp_parallel", iters_offset, niters, pragma480_omp_parallel_hclib_async(u, x, u1, Nparticles)); } long long u_time = get_time(); printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); int j, i; { const int niters = (Nparticles) - (0); const int iters_offset = (0); kernel_launcher("pragma488_omp_parallel", iters_offset, niters, pragma488_omp_parallel_hclib_async(i, CDF, Nparticles, u, j, xj, arrayX, yj, arrayY)); } long long xyj_time = get_time(); printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); //#pragma omp parallel for shared(weights, Nparticles) private(x) for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } free(disk); free(objxy); free(weights); free(likelihood); free(xj); free(yj); free(arrayX); free(arrayY); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char* usage = "openmp.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = time(0)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); free(seed); free(I); return 0; }
targc-273739.c
#include <omp.h> #include <stdio.h> int main (){ omp_set_default_device(0); double *x_d, *y_d; //allocate memory on the device size_t N = 1024*1024*10; int use_device = 1; int chunk = 1; x_d = (double*) omp_target_alloc(N*sizeof(double), omp_get_initial_device() ); y_d = (double*) omp_target_alloc(N*sizeof(double), omp_get_initial_device() ); printf("x_d = %p\n",x_d); printf("y_d = %p\n",y_d); #pragma omp target teams distribute parallel for num_teams(120*4) thread_limit(512) schedule(static,chunk) is_device_ptr(x_d,y_d) if(target:use_device) for (size_t i = 0; i < N; ++i){ x_d[i] = 0.0001*i; y_d[i] = 0.00003*i; } omp_target_free( x_d,omp_get_initial_device() ); omp_target_free( y_d,omp_get_initial_device() ); return 0; }
GB_unaryop__lnot_uint8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_fp32 // op(A') function: GB_tran__lnot_uint8_fp32 // C type: uint8_t // A type: float // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_fp32 ( uint8_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__bnot_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__bnot_int16_int16) // op(A') function: GB (_unop_tran__bnot_int16_int16) // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = ~(z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__bnot_int16_int16) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = ~(z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__bnot_int16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_emult_08_template.c
//------------------------------------------------------------------------------ // GB_emult_08_template: C=A.*B, C<M or !M>=A.*B when C is sparse/hyper //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Computes C=A.*B, C<M>=A.*B, or C<!M>=A.*B when C is sparse or hypersparse: // phase1: does not compute C itself, but just counts the # of entries in each // vector of C. Fine tasks compute the # of entries in their slice of a // single vector of C, and the results are cumsum'd. // phase2: computes C, using the counts computed by phase1. // No input matrix can be jumbled, and C is constructed as unjumbled. // The following cases are handled: // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse sparse (method: 8) // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (method: 8) // sparse bitmap sparse sparse (method: 8) // sparse full sparse sparse (method: 8) // sparse sparse sparse bitmap (9 or 2) // sparse sparse sparse full (9 or 2) // sparse sparse bitmap sparse (10 or 3) // sparse sparse full sparse (10 or 3) // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (8: M later) // sparse bitmap sparse sparse (method: 8) // sparse full sparse sparse (method: 8) // Methods 9 and 10 are not yet implemented, and are currently handled by this // Method 8 instead. those cases. Methods 2 and 3 can be used as well, but // only if M is applied later. See GB_emult_sparsity for this decision. { int taskid ; #pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < C_ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; int64_t len ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; len = TaskList [taskid].len ; } else { // a coarse task operates on one or more whole vectors len = vlen ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of C //------------------------------------------------------------------ int64_t j = GBH (Ch, k) ; #if defined ( GB_PHASE_1_OF_2 ) int64_t cjnz = 0 ; #else int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,j) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ; } else { // The vectors of C are never sliced for a coarse task. pC = Cp [k] ; pC_end = Cp [k+1] ; } int64_t cjnz = pC_end - pC ; if (cjnz == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ int64_t pA = -1, pA_end = -1 ; if (fine_task) { // A fine task operates on Ai,Ax [pA...pA_end-1], which is // a subset of the vector A(:,j) pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // A coarse task operates on the entire vector A (:,j) int64_t kA = (Ch == Ah) ? k : ((C_to_A == NULL) ? j : C_to_A [k]) ; if (kA >= 0) { pA = GBP (Ap, kA, vlen) ; pA_end = GBP (Ap, kA+1, vlen) ; } } int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice int64_t pA_start = pA ; bool adense = (ajnz == len) ; // get the first and last indices in A(:,j) for this vector int64_t iA_first = -1 ; if (ajnz > 0) { iA_first = GBI (Ai, pA, vlen) ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iA_last = -1 ; if (ajnz > 0) { iA_last = GBI (Ai, pA_end-1, vlen) ; } #endif //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t pB = -1, pB_end = -1 ; if (fine_task) { // A fine task operates on Bi,Bx [pB...pB_end-1], which is // a subset of the vector B(:,j) pB = TaskList [taskid].pB ; pB_end = TaskList [taskid].pB_end ; } else { // A coarse task operates on the entire vector B (:,j) int64_t kB = (Ch == Bh) ? k : ((C_to_B == NULL) ? j : C_to_B [k]) ; if (kB >= 0) { pB = GBP (Bp, kB, vlen) ; pB_end = GBP (Bp, kB+1, vlen) ; } } int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice int64_t pB_start = pB ; bool bdense = (bjnz == len) ; // get the first and last indices in B(:,j) for this vector int64_t iB_first = -1 ; if (bjnz > 0) { iB_first = GBI (Bi, pB, vlen) ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iB_last = -1 ; if (bjnz > 0) { iB_last = GBI (Bi, pB_end-1, vlen) ; } #endif //------------------------------------------------------------------ // get M(:,j) if M is sparse or hypersparse //------------------------------------------------------------------ int64_t pM = -1 ; int64_t pM_end = -1 ; if (M_is_sparse_or_hyper) { if (fine_task) { // A fine task operates on Mi,Mx [pM...pM_end-1], which is // a subset of the vector M(:,j) pM = TaskList [taskid].pM ; pM_end = TaskList [taskid].pM_end ; } else { int64_t kM = -1 ; if (Ch == Mh) { // Ch is the same as Mh (a shallow copy), or both NULL kM = k ; } else { kM = (C_to_M == NULL) ? j : C_to_M [k] ; } if (kM >= 0) { pM = GBP (Mp, kM, vlen) ; pM_end = GBP (Mp, kM+1, vlen) ; } } } //------------------------------------------------------------------ // C(:,j)<optional mask> = A (:,j) .* B (:,j) or subvector //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (ajnz == 0 || bjnz == 0) { //-------------------------------------------------------------- // Method8(a): A(:,j) and/or B(:,j) are empty //-------------------------------------------------------------- ; } else if (iA_last < iB_first || iB_last < iA_first) { //-------------------------------------------------------------- // Method8(a): intersection of A(:,j) and B(:,j) is empty //-------------------------------------------------------------- // the last entry of A(:,j) comes before the first entry // of B(:,j), or visa versa ; } else #endif if (M == NULL) { //-------------------------------------------------------------- // Method8(b,c,d): C = A.*B, no mask //-------------------------------------------------------------- // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse sparse (method: 8) // sparse sparse sparse sparse (8, M later) // both A and B are sparse/hyper ASSERT (A_is_sparse || A_is_hyper) ; ASSERT (B_is_sparse || B_is_hyper) ; if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // Method8(b): A(:,j) is much denser than B(:,j) //---------------------------------------------------------- for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // Method8(c): B(:,j) is much denser than A(:,j) //---------------------------------------------------------- for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // Method8(d): A(:,j) and B(:,j) about the sparsity //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = iB ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, iB, j) ; #endif pC++ ; #endif pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } else if (M_is_sparse_or_hyper) { //-------------------------------------------------------------- // Method8(e): C and M are sparse or hypersparse //-------------------------------------------------------------- // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (method: 8) // sparse sparse sparse bitmap (9 or 2) // sparse sparse sparse full (9 or 2) // sparse sparse bitmap sparse (10 or 3) // sparse sparse full sparse (10 or 3) // Methods 9 and 10 are not yet implemented; using Method 8 // (GB_emult_08) instead. // ether A or B are sparse/hyper ASSERT (A_is_sparse || A_is_hyper || B_is_sparse || B_is_hyper); for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // get M(i,j) for A(i,j) .* B (i,j) //---------------------------------------------------------- int64_t i = GBI (Mi, pM, vlen) ; bool mij = GB_mcast (Mx, pM, msize) ; if (!mij) continue ; //---------------------------------------------------------- // get A(i,j) //---------------------------------------------------------- bool afound ; if (adense) { // A(:,j) is dense, bitmap, or full; use quick lookup pA = pA_start + i - iA_first ; afound = GBB (Ab, pA) ; } else { // A(:,j) is sparse; use binary search for A(i,j) int64_t apright = pA_end - 1 ; GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ; } if (!afound) continue ; ASSERT (GBI (Ai, pA, vlen) == i) ; //---------------------------------------------------------- // get B(i,j) //---------------------------------------------------------- bool bfound ; if (bdense) { // B(:,j) is dense; use direct lookup for B(i,j) pB = pB_start + i - iB_first ; bfound = GBB (Bb, pB) ; } else { // B(:,j) is sparse; use binary search for B(i,j) int64_t bpright = pB_end - 1 ; GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ; } if (!bfound) continue ; ASSERT (GBI (Bi, pB, vlen) == i) ; //---------------------------------------------------------- // C(i,j) = A(i,j) .* B(i,j) //---------------------------------------------------------- // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //-------------------------------------------------------------- // M is bitmap or full, for either C<M>=A.*B or C<!M>=A.*B //-------------------------------------------------------------- // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse bitmap sparse sparse (method: 8) // sparse full sparse sparse (method: 8) // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse bitmap sparse sparse (method: 8) // sparse full sparse sparse (method: 8) // GB_GET_MIJ: get M(i,j) where M is bitmap or full #undef GB_GET_MIJ #define GB_GET_MIJ(i) \ int64_t pM = pM_start + i ; \ bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \ if (Mask_comp) mij = !mij ; // both A and B are sparse/hyper ASSERT (A_is_sparse || A_is_hyper) ; ASSERT (B_is_sparse || B_is_hyper) ; int64_t pM_start = j * vlen ; if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // Method8(f): A(:,j) much denser than B(:,j), M bitmap/full //---------------------------------------------------------- for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; GB_GET_MIJ (i) ; if (mij) { // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // Method8(g): B(:,j) much denser than A(:,j), M bitmap/full //---------------------------------------------------------- for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; GB_GET_MIJ (i) ; if (mij) { // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // Method8(h): A(:,j) and B(:,j) about same, M bitmap/full //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist int64_t i = iA ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, iB, j) ; #endif pC++ ; #endif } pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = cjnz ; } else { Cp [k] = cjnz ; } #endif } } }
SceneGraphConverterOCC.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <unordered_set> #include <osg/Material> #include <osg/Geode> #include <osg/CullFace> #include <osg/Point> #include <osg/Switch> #include <osgText/Text> #include <BRepAdaptor_Curve.hxx> #include <BRep_Tool.hxx> #include <BRepMesh_IncrementalMesh.hxx> #include <GCPnts_AbscissaPoint.hxx> #include <GCPnts_UniformAbscissa.hxx> #include <Geom_Line.hxx> #include <Poly.hxx> #include <TopExp.hxx> #include <TopExp_Explorer.hxx> #include <TopoDS.hxx> #include <TopoDS_Edge.hxx> #include <TopoDS_Shape.hxx> #include <TopoDS_Vertex.hxx> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/model/BuildingModel.h> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h> #include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include "GeometryInputDataOCC.h" class ScenegraphConverterOCC : public StatusCallback { protected: std::map<int, osg::ref_ptr<osg::Switch> > m_map_entity_id_to_switch; // Map: IfcProduct ID -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_to_switch; // Map: Representation identifier -> scenegraph switch shared_ptr<GeometrySettings> m_geom_settings; double m_recent_progress = 0; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ScenegraphConverterOCC( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings( geom_settings ) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ScenegraphConverterOCC() {} // after calling convertToOSG, the OSG Switches are in the map returned by this method const std::map<int, osg::ref_ptr<osg::Switch> >& getMapIdSwitch() { return m_map_entity_id_to_switch; } struct RenderOptions { RenderOptions(){} RenderOptions( osg::Vec4f color, double distance_between_points_in_mm = 0.5, bool create_points_along_straight_line = false ) { m_color = color; m_color_set = true; m_distance_between_points_in_mm = distance_between_points_in_mm; m_create_points_along_straight_line = create_points_along_straight_line; } osg::Vec4f m_color; bool m_color_set = false; double m_distance_between_points_in_mm = 0.5; bool m_create_points_along_straight_line = false; }; void clearInputCache() { m_map_entity_id_to_switch.clear(); m_map_representation_to_switch.clear(); m_vec_existing_statesets.clear(); } static void getEdgePoints( const TopoDS_Edge& edge, osg::Vec3Array* vertices, const RenderOptions& render_options ) { Standard_Real first = 0; Standard_Real last = 1; Handle( Geom_Curve ) c = BRep_Tool::Curve( edge, first, last ); bool discretize_points_on_straight_line = render_options.m_create_points_along_straight_line; if( c->DynamicType() == STANDARD_TYPE( Geom_Line ) && !discretize_points_on_straight_line ) { // just straight line const TopoDS_Vertex& v1 = TopExp::FirstVertex( edge ); const TopoDS_Vertex& v2 = TopExp::LastVertex( edge ); gp_Pnt point1 = BRep_Tool::Pnt( v1 ); gp_Pnt point2 = BRep_Tool::Pnt( v2 ); vertices->push_back( osg::Vec3d( point1.X(), point1.Y(), point1.Z() ) ); vertices->push_back( osg::Vec3d( point2.X(), point2.Y(), point2.Z() ) ); } else { double param_range = last - first; BRepAdaptor_Curve curve_adaptor(edge); //curve_adaptor.Initialize( edge ); #ifdef _DEBUG const TopoDS_Vertex& v1 = TopExp::FirstVertex( edge ); const TopoDS_Vertex& v2 = TopExp::LastVertex( edge ); gp_Pnt point1 = BRep_Tool::Pnt( v1 ); gp_Pnt point2 = BRep_Tool::Pnt( v2 ); #endif Standard_Real length_of_edge = GCPnts_AbscissaPoint::Length( curve_adaptor ); double distance = render_options.m_distance_between_points_in_mm; double num_points = 40*param_range/(2.0*M_PI); distance = length_of_edge/num_points; GCPnts_UniformAbscissa uniform_abscissa; uniform_abscissa.Initialize( curve_adaptor, distance ); if( uniform_abscissa.IsDone() ) { int nb_points = uniform_abscissa.NbPoints(); for( int i = 0; i < nb_points; ++i ) { Standard_Real parameter = uniform_abscissa.Parameter( i + 1 ); gp_Pnt pnt = curve_adaptor.Value( parameter ); vertices->push_back( osg::Vec3d( pnt.X(), pnt.Y(), pnt.Z() ) ); if( i > 0 && i < nb_points - 1 ) { vertices->push_back( osg::Vec3d( pnt.X(), pnt.Y(), pnt.Z() ) ); } } if( vertices->size()> 0 ) { if( vertices->size()%2 != 0 ) { vertices->push_back( vertices->back() ); } } } } } static void drawShape( const TopoDS_Shape& shape, osg::Geode* parent_geode, const RenderOptions& render_options ) { if( shape.IsNull() ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_lines = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_tri_storage = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> normals_tri_storage = new osg::Vec3Array(); osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; #ifdef _DEBUG osg::ref_ptr<osg::Vec3Array> vertices_triangle_edges = new osg::Vec3Array(); #endif TopAbs_ShapeEnum shape_type = shape.ShapeType(); if( shape_type == TopAbs_WIRE || shape_type == TopAbs_EDGE || shape_type == TopAbs_VERTEX ) { TopExp_Explorer Ex; for( Ex.Init( shape, TopAbs_EDGE ); Ex.More(); Ex.Next() ) { TopoDS_Edge edge = TopoDS::Edge( Ex.Current() ); getEdgePoints( edge, vertices_lines, render_options ); } } else { Standard_Real linear_tolerance = 0.06*0.001; // for [m] Standard_Real angular_tolerance = 0.5; bool is_relative = false; BRepMesh_IncrementalMesh incremental_mesh( shape, linear_tolerance, is_relative, angular_tolerance ); TopExp_Explorer shape_explorer( shape, TopAbs_FACE ); for( ; shape_explorer.More(); shape_explorer.Next() ) { const TopoDS_Face& face = TopoDS::Face( shape_explorer.Current() ); TopLoc_Location L = TopLoc_Location(); const Handle( Poly_Triangulation )& poly_triangulation = BRep_Tool::Triangulation( face, L ); if( poly_triangulation.IsNull() ) { continue; } const gp_Trsf & face_trsf = L.Transformation(); Poly::ComputeNormals( poly_triangulation ); const TColgp_Array1OfPnt& triang_vertices = poly_triangulation->Nodes(); const TShort_Array1OfShortReal& triang_normals = poly_triangulation->Normals(); const Poly_Array1OfTriangle& triangles = poly_triangulation->Triangles(); // Number of nodes in the triangulation int num_vertices = poly_triangulation->Nodes().Length(); if( num_vertices*3 != triang_normals.Length() ) { std::cout << "Different number of normals and vertices\n"; return; } if( !vertices_tri_storage ) { vertices_tri_storage = new osg::Vec3Array(); } size_t offset_vertex_storage = vertices_tri_storage->size(); if( !normals_tri_storage ) { normals_tri_storage = new osg::Vec3Array(); } //size_t offset_normals_storage = normals_tri_storage->size(); // Get each vertex index, checking common vertices between shapes for( int i = 0; i < num_vertices; i++ ) { gp_Pnt triang_point = triang_vertices.Value( i+1 ); gp_Vec normal( triang_normals.Value( i*3 + 1 ), triang_normals.Value( i*3 + 2 ), triang_normals.Value( i*3 + 3 ) ); if( face_trsf.Form() != gp_Identity ) { triang_point.Transform( face_trsf ); normal.Transform( face_trsf ); } double x = std::round( triang_point.X()*10.0 )*0.1; double y = std::round( triang_point.Y()*10.0 )*0.1; double z = std::round( triang_point.Z()*10.0 )*0.1; vertices_tri_storage->push_back( osg::Vec3d( x, y, z ) ); normals_tri_storage->push_back( osg::Vec3d( normal.X(), normal.Y(), normal.Z() ) ); } if( !vertices_tri ) { vertices_tri = new osg::Vec3Array(); } if( !normals_tri ) { normals_tri = new osg::Vec3Array(); } int num_stored_vertices = vertices_tri_storage->size(); for( auto it = triangles.begin(); it != triangles.end(); ++it ) { const Poly_Triangle& triang = *it; int idx_tri1, idx_tri2, idx_tri3; triang.Get( idx_tri1, idx_tri2, idx_tri3 ); int idx1 = offset_vertex_storage + idx_tri1 - 1; int idx2 = offset_vertex_storage + idx_tri2 - 1; int idx3 = offset_vertex_storage + idx_tri3 - 1; if( idx1 >= num_stored_vertices || idx2 >= num_stored_vertices || idx3 >= num_stored_vertices ) { std::cout << "idx > num_stored_vertices" << std::endl; continue; } osg::Vec3 v1 = vertices_tri_storage->at( idx1 ); osg::Vec3 v2 = vertices_tri_storage->at( idx2 ); osg::Vec3 v3 = vertices_tri_storage->at( idx3 ); vertices_tri->push_back( v1 ); vertices_tri->push_back( v2 ); vertices_tri->push_back( v3 ); osg::Vec3 n1 = normals_tri_storage->at( idx1 ); osg::Vec3 n2 = normals_tri_storage->at( idx2 ); osg::Vec3 n3 = normals_tri_storage->at( idx3 ); normals_tri->push_back( n1 ); normals_tri->push_back( n2 ); normals_tri->push_back( n3 ); } } } if( vertices_tri->size() > 0 ) { if( vertices_tri->size() == normals_tri->size() ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); parent_geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); colors_normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); parent_geode->addDrawable( geometry_normals ); #endif } } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); parent_geode->addDrawable( geometry ); } } if( vertices_lines->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_lines ); if( render_options.m_color_set ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->push_back( render_options.m_color ); colors->setBinding( osg::Array::BIND_OVERALL ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_lines->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); parent_geode->addDrawable( geometry ); } #ifdef _DEBUG if( vertices_triangle_edges->size() > 0 && false ) { { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices_triangle_edges ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices_triangle_edges->size(), osg::Vec4f( 0.6f, 0.7f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_triangle_edges->size() ) ); parent_geode->addDrawable( geometry ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } } #endif } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } AppearanceData::GeometryTypeEnum geom_type = appearance->m_apply_to_geometry_type; if( geom_type == AppearanceData::GEOM_TYPE_SURFACE || geom_type == AppearanceData::GEOM_TYPE_ANY ) { osg::StateSet* item_stateset = convertToOSGStateSet( appearance ); if( item_stateset != nullptr ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( geom_type == AppearanceData::GEOM_TYPE_CURVE ) { //osg::Vec4f color_lines( appearance->m_color_ambient.m_r, appearance->m_color_ambient.m_g, appearance->m_color_ambient.m_b, appearance->m_color_ambient.m_a ); //GeomUtils::setColorToLines( grp, color_lines ); } } } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeDataOCC>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } RenderOptions render_options; shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { return; } const int product_id = ifc_product->m_entity_id; std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; // create OSG objects std::vector<shared_ptr<RepresentationDataOCC> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationDataOCC>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeDataOCC> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeDataOCC>& item_input_data = product_items[i_item]; osg::ref_ptr<osg::Group> item_group = new osg::Group(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii_shapes = 0; ii_shapes < item_input_data->getShapes().size(); ++ii_shapes ) { const TopoDS_Shape& item_shape = item_input_data->getShapes()[ii_shapes]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawShape( item_shape, geode, render_options ); // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); if( geode->getNumDrawables() > 0 ) { item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", open shape " << ii_shapes; geode->setName( strs_item_shape_name.str().c_str() ); #endif } } // create shape for points const std::vector<TopoDS_Vertex>& vertex_points = item_input_data->getVertexPoints(); if( vertex_points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t ii_vertex_point = 0; ii_vertex_point < vertex_points.size(); ++ii_vertex_point ) { const TopoDS_Vertex& vertex_input = vertex_points[ii_vertex_point]; if( !vertex_input.IsNull() ) { gp_Pnt point1 = BRep_Tool::Pnt( vertex_input ); vertices->push_back( osg::Vec3d( point1.X(), point1.Y(), point1.Z() ) ); } } if( vertices->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", vertex_point "; geode->setName( strs_item_shape_name.str().c_str() ); #endif } else { std::cout << __FUNC__ << ": unexpected vertices->size() == 0" << std::endl; } } // create shape for polylines for( size_t ii_shapes = 0; ii_shapes < item_input_data->getPolylines().size(); ++ii_shapes ) { const TopoDS_Wire& polyline_data = item_input_data->getPolylines()[ii_shapes]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); RenderOptions render_options_polyline; if( item_input_data->getAppearances().size() > 0 ) { for( size_t ii_appearances = 0; ii_appearances < item_input_data->getAppearances().size(); ++ii_appearances ) { const shared_ptr<AppearanceData>& appearance = item_input_data->getAppearances()[ii_appearances]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::Vec4f color_lines( appearance->m_color_ambient.m_r, appearance->m_color_ambient.m_g, appearance->m_color_ambient.m_b, appearance->m_color_ambient.m_a ); render_options_polyline.m_color = color_lines; render_options_polyline.m_color_set = true; break; } } } drawShape( polyline_data, geode, render_options_polyline ); if( geode->getNumDrawables() > 0 ) { item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_shape_name; strs_item_shape_name << strs_item_name.str().c_str() << ", polylines " << ii_shapes; geode->setName( strs_item_shape_name.str().c_str() ); #endif } } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_input_data->getTextItems().size(); ++ii ) { const shared_ptr<TextItemDataOCC>& text_data = item_input_data->getTextItems()[ii]; if( !text_data ) { continue; } gp_Trsf& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); gp_XYZ pos_translation = text_pos.TranslationPart(); osg::Vec3 pos2( pos_translation.X(), pos_translation.Y(), pos_translation.Z() );// text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_input_data->getAppearances().size() > 0 ) { applyAppearancesToGroup( item_input_data->getAppearances(), item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f, true ); } } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( std::map<int, shared_ptr<ProductShapeDataOCC> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_id_to_switch.clear(); m_map_representation_to_switch.clear(); shared_ptr<ProductShapeDataOCC> ifc_project_data; std::vector<shared_ptr<ProductShapeDataOCC> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeDataOCC> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<int, osg::ref_ptr<osg::Switch> >* map_entity_id = &m_map_entity_id_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_products) shared(map_entity_id, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,10) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeDataOCC>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def( ifc_object_def_weak ); std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( Standard_Failure& sf ) { thread_err << sf.GetMessageString(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); std::stringstream strs_product_switch_name; strs_product_switch_name << "#" << product_id << "=" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_switch->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_id->insert( std::make_pair( product_id, product_switch ) ); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } } void addNodes( const std::map<int, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { int product_id = it_product_shapes->first; auto it_find = m_map_entity_id_to_switch.find( product_id ); if( it_find != m_map_entity_id_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } bool inParentList( const int entity_id, osg::Group* group ) { if( !group ) { return false; } const osg::Group::ParentList& vec_parents = group->getParents(); for( size_t ii = 0; ii < vec_parents.size(); ++ii ) { osg::Group* parent = vec_parents[ii]; if( parent ) { const std::string parent_name = parent->getName(); if( parent_name.length() > 0 ) { if( parent_name.at( 0 ) == '#' ) { // extract entity id std::string parent_name_id = parent_name.substr( 1 ); size_t last_index = parent_name_id.find_first_not_of( "0123456789" ); std::string id_str = parent_name_id.substr( 0, last_index ); const int id = std::stoi( id_str.c_str() ); if( id == entity_id ) { return true; } bool in_parent_list = inParentList( entity_id, parent ); if( in_parent_list ) { return true; } } } } } return false; } void resolveProjectStructure( const shared_ptr<ProductShapeDataOCC>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> object_def( product_data->m_ifc_object_definition ); const int entity_id = object_def->m_entity_id; if( SceneGraphUtils::inParentList( entity_id, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeDataOCC> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeDataOCC>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); resolveProjectStructure( child_product_data, group_subparts ); if( group_subparts->getNumChildren() > 0 ) { if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); } } auto it_product_map = m_map_entity_id_to_switch.find( entity_id ); if( it_product_map != m_map_entity_id_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << "#" << entity_id << "=" << object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } osg::StateSet* convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence ) { if( !appearence ) { return nullptr; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( abs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( abs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( abs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( abs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( abs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( abs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( abs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( abs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( abs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( abs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( abs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( abs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( abs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used return stateset_existing; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = new osg::StateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( stateset ); } return stateset; } };
GB_unaryop__minv_bool_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_int64 // op(A') function: GB_tran__minv_bool_int64 // C type: bool // A type: int64_t // cast: ; // unaryop: cij = true #define GB_ATYPE \ int64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_int64 ( bool *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
compatibility.h
// -*- C++ -*- // Copyright (C) 2007, 2008, 2009, 2010, 2012 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/compatibility.h * @brief Compatibility layer, mostly concerned with atomic operations. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1 #include <parallel/types.h> #include <parallel/base.h> #if defined(__SUNPRO_CC) && defined(__sparc) #include <sys/atomic.h> #endif #if !defined(_WIN32) || defined (__CYGWIN__) #include <sched.h> #endif #if defined(_MSC_VER) #include <Windows.h> #include <intrin.h> #undef max #undef min #endif #ifdef __MINGW32__ // Including <windows.h> will drag in all the windows32 names. Since // that can cause user code portability problems, we just declare the // one needed function here. extern "C" __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long); #endif namespace __gnu_parallel { #if defined(__ICC) template<typename _MustBeInt = int> int32_t __faa32(int32_t* __x, int32_t __inc) { asm volatile("lock xadd %0,%1" : "=__r" (__inc), "=__m" (*__x) : "0" (__inc) : "memory"); return __inc; } #if defined(__x86_64) template<typename _MustBeInt = int> int64_t __faa64(int64_t* __x, int64_t __inc) { asm volatile("lock xadd %0,%1" : "=__r" (__inc), "=__m" (*__x) : "0" (__inc) : "memory"); return __inc; } #endif #endif // atomic functions only work on integers /** @brief Add a value to a variable, atomically. * * Implementation is heavily platform-dependent. * @param __ptr Pointer to a 32-bit signed integer. * @param __addend Value to add. */ inline int32_t __fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend) { #if defined(__ICC) //x86 version return _InterlockedExchangeAdd((void*)__ptr, __addend); #elif defined(__ECC) //IA-64 version return _InterlockedExchangeAdd((void*)__ptr, __addend); #elif defined(__ICL) || defined(_MSC_VER) return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(__ptr), __addend); #elif defined(__GNUC__) return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); #elif defined(__SUNPRO_CC) && defined(__sparc) volatile int32_t __before, __after; do { __before = *__ptr; __after = __before + __addend; } while (atomic_cas_32((volatile unsigned int*)__ptr, __before, __after) != __before); return __before; #else //fallback, slow #pragma message("slow __fetch_and_add_32") int32_t __res; #pragma omp critical { __res = *__ptr; *(__ptr) += __addend; } return __res; #endif } /** @brief Add a value to a variable, atomically. * * Implementation is heavily platform-dependent. * @param __ptr Pointer to a 64-bit signed integer. * @param __addend Value to add. */ inline int64_t __fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend) { #if defined(__ICC) && defined(__x86_64) //x86 version return __faa64<int>((int64_t*)__ptr, __addend); #elif defined(__ECC) //IA-64 version return _InterlockedExchangeAdd64((void*)__ptr, __addend); #elif defined(__ICL) || defined(_MSC_VER) #ifndef _WIN64 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case return 0; #else return _InterlockedExchangeAdd64(__ptr, __addend); #endif #elif defined(__GNUC__) && defined(__x86_64) return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); #elif defined(__GNUC__) && defined(__i386) && \ (defined(__i686) || defined(__pentium4) || defined(__athlon) \ || defined(__k8) || defined(__core2)) return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); #elif defined(__SUNPRO_CC) && defined(__sparc) volatile int64_t __before, __after; do { __before = *__ptr; __after = __before + __addend; } while (atomic_cas_64((volatile unsigned long long*)__ptr, __before, __after) != __before); return __before; #else //fallback, slow #if defined(__GNUC__) && defined(__i386) // XXX doesn'__t work with -march=native //#warning "please compile with -march=i686 or better" #endif #pragma message("slow __fetch_and_add_64") int64_t __res; #pragma omp critical { __res = *__ptr; *(__ptr) += __addend; } return __res; #endif } /** @brief Add a value to a variable, atomically. * * Implementation is heavily platform-dependent. * @param __ptr Pointer to a signed integer. * @param __addend Value to add. */ template<typename _Tp> inline _Tp __fetch_and_add(volatile _Tp* __ptr, _Tp __addend) { if (sizeof(_Tp) == sizeof(int32_t)) return (_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend); else if (sizeof(_Tp) == sizeof(int64_t)) return (_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend); else _GLIBCXX_PARALLEL_ASSERT(false); } #if defined(__ICC) template<typename _MustBeInt = int> inline int32_t __cas32(volatile int32_t* __ptr, int32_t __old, int32_t __nw) { int32_t __before; __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a"(__before) : "q"(__nw), "__m"(*(volatile long long*)(__ptr)), "0"(__old) : "memory"); return __before; } #if defined(__x86_64) template<typename _MustBeInt = int> inline int64_t __cas64(volatile int64_t *__ptr, int64_t __old, int64_t __nw) { int64_t __before; __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a"(__before) : "q"(__nw), "__m"(*(volatile long long*)(__ptr)), "0"(__old) : "memory"); return __before; } #endif #endif /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c * *__ptr=__replacement and return @c true, return @c false otherwise. * * Implementation is heavily platform-dependent. * @param __ptr Pointer to 32-bit signed integer. * @param __comparand Compare value. * @param __replacement Replacement value. */ inline bool __compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand, int32_t __replacement) { #if defined(__ICC) //x86 version return _InterlockedCompareExchange((void*)__ptr, __replacement, __comparand) == __comparand; #elif defined(__ECC) //IA-64 version return _InterlockedCompareExchange((void*)__ptr, __replacement, __comparand) == __comparand; #elif defined(__ICL) || defined(_MSC_VER) return _InterlockedCompareExchange( reinterpret_cast<volatile long*>(__ptr), __replacement, __comparand) == __comparand; #elif defined(__GNUC__) return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); #elif defined(__SUNPRO_CC) && defined(__sparc) return atomic_cas_32((volatile unsigned int*)__ptr, __comparand, __replacement) == __comparand; #else #pragma message("slow __compare_and_swap_32") bool __res = false; #pragma omp critical { if (*__ptr == __comparand) { *__ptr = __replacement; __res = true; } } return __res; #endif } /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c * *__ptr=__replacement and return @c true, return @c false otherwise. * * Implementation is heavily platform-dependent. * @param __ptr Pointer to 64-bit signed integer. * @param __comparand Compare value. * @param __replacement Replacement value. */ inline bool __compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand, int64_t __replacement) { #if defined(__ICC) && defined(__x86_64) //x86 version return __cas64<int>(__ptr, __comparand, __replacement) == __comparand; #elif defined(__ECC) //IA-64 version return _InterlockedCompareExchange64((void*)__ptr, __replacement, __comparand) == __comparand; #elif defined(__ICL) || defined(_MSC_VER) #ifndef _WIN64 _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case return 0; #else return _InterlockedCompareExchange64(__ptr, __replacement, __comparand) == __comparand; #endif #elif defined(__GNUC__) && defined(__x86_64) return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); #elif defined(__GNUC__) && defined(__i386) && \ (defined(__i686) || defined(__pentium4) || defined(__athlon) \ || defined(__k8) || defined(__core2)) return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); #elif defined(__SUNPRO_CC) && defined(__sparc) return atomic_cas_64((volatile unsigned long long*)__ptr, __comparand, __replacement) == __comparand; #else #if defined(__GNUC__) && defined(__i386) // XXX -march=native //#warning "please compile with -march=i686 or better" #endif #pragma message("slow __compare_and_swap_64") bool __res = false; #pragma omp critical { if (*__ptr == __comparand) { *__ptr = __replacement; __res = true; } } return __res; #endif } /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c * *__ptr=__replacement and return @c true, return @c false otherwise. * * Implementation is heavily platform-dependent. * @param __ptr Pointer to signed integer. * @param __comparand Compare value. * @param __replacement Replacement value. */ template<typename _Tp> inline bool __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) { if (sizeof(_Tp) == sizeof(int32_t)) return __compare_and_swap_32((volatile int32_t*) __ptr, (int32_t)__comparand, (int32_t)__replacement); else if (sizeof(_Tp) == sizeof(int64_t)) return __compare_and_swap_64((volatile int64_t*) __ptr, (int64_t)__comparand, (int64_t)__replacement); else _GLIBCXX_PARALLEL_ASSERT(false); } /** @brief Yield the control to another thread, without waiting for the end to the time slice. */ inline void __yield() { #if defined (_WIN32) && !defined (__CYGWIN__) Sleep(0); #else sched_yield(); #endif } } // end namespace #endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
GB_unaryop__ainv_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_uint16 // op(A') function: GB_tran__ainv_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_uint16 ( uint16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residualbased_elimination_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #include <unordered_set> /* External includes */ #ifdef KRATOS_SMP_OPENMP #include <omp.h> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "utilities/builtin_timer.h" #include "utilities/atomic_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard elimination builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver); /// Definition of the base class typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; /// The definition of the current class typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; /// Definition of the classes from the base class typedef typename BaseType::SizeType SizeType; typedef typename BaseType::IndexType IndexType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; /// Definition of the equation id vector typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; /// Node definition typedef Node<3> NodeType; /// Containers definition typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolver() : BaseType() { } /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Constructor. */ explicit ResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BaseType(pNewLinearSystemSolver) { } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolver() override { } /** * @brief Create method * @param pNewLinearSystemSolver The linear solver for the system of equations * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); // Getting the elements from the model const int nelements = static_cast<int>(r_elements_array.size()); // Getting the array of the conditions const int nconditions = static_cast<int>(r_conditions_array.size()); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const auto it_elem_begin = r_elements_array.begin(); const auto it_cond_begin = r_conditions_array.begin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; // Assemble all elements const auto timer = BuiltinTimer(); #pragma omp parallel firstprivate(LHS_Contribution, RHS_Contribution, equation_id ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; ++k) { auto it_elem = it_elem_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_elem, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray); #else Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id); #endif } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; ++k) { auto it_cond = it_cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_cond, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info); #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray); #else Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id); #endif } } } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System build time: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished building" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building of the LHS * @details Depending on the implementation choosen the size of the matrix could be equal to the total number of Dofs or to the number of unrestrained dofs * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix */ void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA ) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); // Getting the elements from the model const int nelements = static_cast<int>(r_elements_array.size()); // Getting the array of the conditions const int nconditions = static_cast<int>(r_conditions_array.size()); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const auto it_elem_begin = r_elements_array.begin(); const auto it_cond_begin = r_conditions_array.begin(); // Resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); // Contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; #pragma omp parallel firstprivate(LHS_Contribution, equation_id ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; ++k) { auto it_elem = it_elem_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHS(rA, LHS_Contribution, equation_id); } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; ++k) { auto it_cond = it_cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHS(rA, LHS_Contribution, equation_id); } } } KRATOS_CATCH("") } /** * @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom * and "N" is the total number of degrees of freedom involved. * @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed * degrees of freedom (but keeping the columns!!) * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA ) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); // Getting the elements from the model const int nelements = static_cast<int>(r_elements_array.size()); // Getting the array of the conditions const int nconditions = static_cast<int>(r_conditions_array.size()); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const auto it_elem_begin = r_elements_array.begin(); const auto it_cond_begin = r_conditions_array.begin(); // Resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); // Contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; #pragma omp parallel firstprivate(LHS_Contribution, equation_id ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; ++k) { auto it_elem = it_elem_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id); } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; ++k) { auto it_cond = it_cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id); } } } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void SystemSolve( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(rb) != 0) { norm_b = TSparseSpace::TwoNorm(rb); } else { norm_b = 0.0; } if (norm_b != 0.0) { // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else TSparseSpace::SetToZero(rDx); // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, ModelPart& rModelPart ) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(rb) != 0) { norm_b = TSparseSpace::TwoNorm(rb); } else { norm_b = 0.0; } if (norm_b != 0.0) { // Provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, BaseType::mDofSet, rModelPart); // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY Timer::Start("Build"); Build(pScheme, rModelPart, rA, rb); Timer::Stop("Build"); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; const auto timer = BuiltinTimer(); Timer::Start("Solve"); SystemSolveWithPhysics(rA, rDx, rb, rModelPart); Timer::Stop("Solve"); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() >=1) << "System solve time: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; KRATOS_CATCH("") } /** * @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BuildRHS(pScheme, rModelPart, rb); SystemSolve(rA, rDx, rb); KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) override { KRATOS_TRY // Resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } // Getting the Elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType equation_id; // Assemble all elements #pragma omp parallel firstprivate( RHS_Contribution, equation_id) { const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->CalculateRHSContribution(*it_elem, RHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHS(rb, RHS_Contribution, equation_id); } } // Assemble all conditions const auto it_cond_begin = r_conditions_array.begin(); const int nconditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i < nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateRHSContribution(*it_cond, RHS_Contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHS(rb, RHS_Contribution, equation_id); } } } KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; // Gets the array of elements from the modeler ElementsArrayType& r_elements_array = rModelPart.Elements(); const int nelements = static_cast<int>(r_elements_array.size()); DofsVectorType elemental_dof_list; const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); SizeType nthreads = ParallelUtilities::GetNumThreads(); typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; std::vector<set_type> dofs_aux_list(nthreads); for (int i = 0; i < static_cast<int>(nthreads); ++i) { dofs_aux_list[i].reserve(nelements); } IndexPartition<std::size_t>(nelements).for_each(elemental_dof_list, [&](std::size_t Index, DofsVectorType& tls_elemental_dof_list){ auto it_elem = r_elements_array.begin() + Index; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, tls_elemental_dof_list, r_current_process_info); dofs_aux_list[this_thread_id].insert(tls_elemental_dof_list.begin(), tls_elemental_dof_list.end()); }); ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int nconditions = static_cast<int>(r_conditions_array.size()); IndexPartition<std::size_t>(nconditions).for_each(elemental_dof_list, [&](std::size_t Index, DofsVectorType& tls_elemental_dof_list){ auto it_cond = r_conditions_array.begin() + Index; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, tls_elemental_dof_list, r_current_process_info); dofs_aux_list[this_thread_id].insert(tls_elemental_dof_list.begin(), tls_elemental_dof_list.end()); }); // Here we do a reduction in a tree so to have everything on thread 0 SizeType old_max = nthreads; SizeType new_max = ceil(0.5*static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { IndexPartition<std::size_t>(new_max).for_each([&](std::size_t Index){ if (Index + new_max < old_max) { dofs_aux_list[Index].insert(dofs_aux_list[Index + new_max].begin(), dofs_aux_list[Index + new_max].end()); dofs_aux_list[Index + new_max].clear(); } }); old_max = new_max; new_max = ceil(0.5*static_cast<double>(old_max)); } DofsArrayType dof_temp; BaseType::mDofSet = DofsArrayType(); dof_temp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); ++it) { dof_temp.push_back(*it); } dof_temp.Sort(); BaseType::mDofSet = dof_temp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY if (mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(mLockArray.size()); i++) omp_destroy_lock(&mLockArray[i]); } mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mLockArray.size()); i++) omp_init_lock(&mLockArray[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem(ModelPart& rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } /** * @brief This method resize and initializes the system of euqations * @param pA The pointer to the LHS matrix * @param pDx The pointer to the vector of Unknowns * @param pb The pointer to the RHS vector * @param rModelPart The model part to be computed */ void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY if (pA == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == nullptr) { // If the pointer is not initialized initialize it to an empty matrix TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& rA = *pA; TSystemVectorType& rDx = *pDx; TSystemVectorType& rb = *pb; // Resizing the system vectors and matrix if (rA.size1() == 0 || BaseType::GetReshapeMatrixFlag()) { // If the matrix is not initialized rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, rA, rModelPart); } else { if (rA.size1() != BaseType::mEquationSystemSize || rA.size2() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, rA, rModelPart); } } if (rDx.size() != BaseType::mEquationSystemSize) { rDx.resize(BaseType::mEquationSystemSize, false); } TSparseSpace::SetToZero(rDx); if (rb.size() != BaseType::mEquationSystemSize) { rb.resize(BaseType::mEquationSystemSize, false); } TSparseSpace::SetToZero(rb); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize; if (BaseType::mpReactionsVector->size() != reactions_vector_size) BaseType::mpReactionsVector->resize(reactions_vector_size, false); } KRATOS_CATCH("") } /** * @brief This method computes the reactions * @param pScheme The integration scheme considered * @param rModelPart The model part considered * @param rA The LHS of the system * @param rDx The vector of Unknowns * @param rb The RHS vector */ void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { //refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, rb); // Updating variables std::size_t i; TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) { i = (*it2)->EquationId(); if (i >= BaseType::mEquationSystemSize) { i -= BaseType::mEquationSystemSize; (*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i]; } } } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); this->mpReactionsVector.reset(); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "elimination_builder_and_solver" })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "elimination_builder_and_solver"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolver"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ #ifdef USE_LOCKS_IN_ASSEMBLY std::vector<omp_lock_t> mLockArray; #endif ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assembles the system * @param rA The LHS of the system * @param rb The RHS of the system * @param rLHSContribution The LHS local contribution * @param rRHSContribution The RHS local contribution * @param rEquationId The equation id * @param rLockArray The lock of the dof * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void Assemble( TSystemMatrixType& rA, TSystemVectorType& rb, const LocalSystemMatrixType& rLHSContribution, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId #ifdef USE_LOCKS_IN_ASSEMBLY ,std::vector< omp_lock_t >& rLockArray #endif ) { const SizeType local_size = rLHSContribution.size1(); for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&rLockArray[i_global]); rb[i_global] += rRHSContribution(i_local); #else double& r_a = rb[i_global]; const double& v_a = rRHSContribution(i_local); AtomicAdd(r_a, v_a); #endif AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&rLockArray[i_global]); #endif } //note that computation of reactions is not performed here! } } /** * @brief This method construcs the relationship between the DoF * @param pScheme The integration scheme * @param rA The LHS of the system * @param rModelPart The model part which defines the problem */ virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); const SizeType equation_size = BaseType::mEquationSystemSize; std::vector<std::unordered_set<IndexType> > indices(equation_size); block_for_each(indices, [](std::unordered_set<IndexType>& rIndices){ rIndices.reserve(40); }); Element::EquationIdVectorType ids(3, 0); #pragma omp parallel firstprivate(ids) { // The process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<std::unordered_set<IndexType> > temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = it_elem_begin + i_elem; pScheme->EquationId( *it_elem, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = it_cond_begin + i_cond; pScheme->EquationId( *it_cond, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < indices.size(); ++i) nnz += indices[i].size(); rA = TSystemMatrixType(indices.size(), indices.size(), nnz); double* Avalues = rA.value_data().begin(); std::size_t* Arow_indices = rA.index1_data().begin(); std::size_t* Acol_indices = rA.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (IndexType i = 0; i < rA.size1(); ++i) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); IndexPartition<std::size_t>(rA.size1()).for_each([&](std::size_t Index){ const IndexType row_begin = Arow_indices[Index]; const IndexType row_end = Arow_indices[Index + 1]; IndexType k = row_begin; for (auto it = indices[Index].begin(); it != indices[Index].end(); ++it) { Acol_indices[k] = *it; Avalues[k] = 0.0; ++k; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); }); rA.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } /** * @brief This method assembles the LHS of the system * @param rA The LHS to assemble * @param rLHSContribution The local LHS contribution * @param rEquationId The equation id */ void AssembleLHS( TSystemMatrixType& rA, LocalSystemMatrixType& rLHSContribution, EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (IndexType j_local = 0; j_local < local_size; ++j_local) { const IndexType j_global = rEquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { rA(i_global, j_global) += rLHSContribution(i_local, j_local); } } } } } /** * @brief This function is equivalent to the AssembleRowContribution of the block builder and solver * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped */ inline void AssembleRowContributionFreeDofs( TSystemMatrixType& rA, const Matrix& rALocal, const IndexType i, const IndexType i_local, const Element::EquationIdVectorType& EquationId ) { double* values_vector = rA.value_data().begin(); IndexType* index1_vector = rA.index1_data().begin(); IndexType* index2_vector = rA.index2_data().begin(); const IndexType left_limit = index1_vector[i]; // Find the first entry // We iterate over the equation ids until we find the first equation id to be considered // We count in which component we find an ID IndexType last_pos = 0; IndexType last_found = 0; IndexType counter = 0; for(IndexType j=0; j < EquationId.size(); ++j) { ++counter; const IndexType j_global = EquationId[j]; if (j_global < BaseType::mEquationSystemSize) { last_pos = ForwardFind(j_global,left_limit,index2_vector); last_found = j_global; break; } } // If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered if (counter <= EquationId.size()) { #ifndef USE_LOCKS_IN_ASSEMBLY double& r_a = values_vector[last_pos]; const double& v_a = rALocal(i_local,counter - 1); AtomicAdd(r_a, v_a); #else values_vector[last_pos] += rALocal(i_local,counter - 1); #endif // Now find all of the other entries IndexType pos = 0; for(IndexType j = counter; j < EquationId.size(); ++j) { IndexType id_to_find = EquationId[j]; if (id_to_find < BaseType::mEquationSystemSize) { if(id_to_find > last_found) pos = ForwardFind(id_to_find,last_pos+1,index2_vector); else if(id_to_find < last_found) pos = BackwardFind(id_to_find,last_pos-1,index2_vector); else pos = last_pos; #ifndef USE_LOCKS_IN_ASSEMBLY double& r = values_vector[pos]; const double& v = rALocal(i_local,j); AtomicAdd(r, v); #else values_vector[pos] += rALocal(i_local,j); #endif last_found = id_to_find; last_pos = pos; } } } } inline IndexType ForwardFind(const IndexType id_to_find, const IndexType start, const IndexType* index_vector) { IndexType pos = start; while(id_to_find != index_vector[pos]) pos++; return pos; } inline IndexType BackwardFind(const IndexType id_to_find, const IndexType start, const IndexType* index_vector) { IndexType pos = start; while(id_to_find != index_vector[pos]) pos--; return pos; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method ensures that the contribution is unique */ inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } /** * @brief This method assembles the RHS of the system * @param rb The RHS to assemble * @param rRHSContribution The local RHS contribution * @param rEquationId The equation id */ void AssembleRHS( TSystemVectorType& rb, const LocalSystemVectorType& rRHSContribution, const EquationIdVectorType& rEquationId ) { SizeType local_size = rRHSContribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { // Free dof // ASSEMBLING THE SYSTEM VECTOR double& b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; AtomicAdd(b_value, rhs_value); } } } else { TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { //free dof // ASSEMBLING THE SYSTEM VECTOR double& b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; AtomicAdd(b_value, rhs_value); } else { // Fixed dof double& b_value = r_reactions_vector[i_global - BaseType::mEquationSystemSize]; const double& rhs_value = rRHSContribution[i_local]; AtomicAdd(b_value, rhs_value); } } } } /** * @brief This method assembles the LHS of the system (on free rows) * @param rA The LHS to assemble * @param rLHSContribution The local LHS contribution * @param rEquationId The equation id */ void AssembleLHSCompleteOnFreeRows( TSystemMatrixType& rA, LocalSystemMatrixType& rLHSContribution, EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (IndexType j_local = 0; j_local < local_size; ++j_local) { const IndexType j_global = rEquationId[j_local]; rA(i_global, j_global) += rLHSContribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
pdpotrf.c
/** * * @file pdpotrf.c * * PLASMA auxiliary routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Jakub Kurzak * @author Hatem Ltaief * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:10 2014 * **/ #include "common.h" #ifdef USE_MKL #include <mkl_lapacke.h> #else #include <lapacke.h> #endif #if defined(USE_OMPEXT) #include <omp_ext.h> #endif #define A(m,n) BLKADDR(A, double, m, n) /***************************************************************************//** * Parallel tile Cholesky factorization - dynamic scheduling **/ void plasma_pdpotrf_quark(PLASMA_enum uplo, PLASMA_desc A) { int k, m, n; int ldak, ldam; int tempkm, tempmm; double zone = (double) 1.0; double mzone = (double)-1.0; /* * PlasmaLower */ if (uplo == PlasmaLower) { abort(); } /* * PlasmaUpper */ else { for (k = 0; k < A.nt; k++) { tempkm = k == A.nt-1 ? A.n-k*A.nb : A.nb; ldak = BLKLDD(A, k); double *dA = A(k, k); #if defined(USE_OMPEXT) omp_set_task_priority(1); #endif #pragma omp task depend(inout:dA[0:A.mb*A.mb]) { LAPACKE_dpotrf_work(LAPACK_COL_MAJOR, lapack_const(PlasmaUpper), tempkm, dA, ldak); } for (m = k+1; m < A.nt; m++) { tempmm = m == A.nt-1 ? A.n-m*A.nb : A.nb; double *dA = A(k, k); double *dB = A(k, m); #pragma omp task depend(in:dA[0:A.mb*A.mb]) depend(inout:dB[0:A.mb*A.mb]) cblas_dtrsm( CblasColMajor, (CBLAS_SIDE)PlasmaLeft, (CBLAS_UPLO)PlasmaUpper, (CBLAS_TRANSPOSE)PlasmaTrans, (CBLAS_DIAG)PlasmaNonUnit, A.mb, tempmm, zone, dA, ldak, dB, ldak); } for (m = k+1; m < A.nt; m++) { tempmm = m == A.nt-1 ? A.n-m*A.nb : A.nb; ldam = BLKLDD(A, m); double *dA = A(k, m); double *dB = A(m, m); #pragma omp task depend(in:dA[0:A.mb*A.mb]) depend(inout:dB[0:A.mb*A.mb]) { cblas_dsyrk( CblasColMajor, (CBLAS_UPLO)PlasmaUpper, (CBLAS_TRANSPOSE)PlasmaTrans, tempmm, A.mb, (-1.0), dA, ldak, (1.0), dB, ldam); } for (n = k+1; n < m; n++) { double *dA = A(k , n); double *dB = A(k , m); double *dC = A(n , m); #pragma omp task depend(in:dA[0:A.mb*A.mb], dB[0:A.mb*A.mb]) depend(inout:dC[0:A.mb*A.mb]) cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaTrans, (CBLAS_TRANSPOSE)PlasmaNoTrans, A.mb, tempmm, A.mb, mzone, dA, ldak, dB, ldak, zone, dC, A.mb); } } } } }
pagerank.c
#include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #include <omp.h> #include "mt19937p.h" #define g(x, y) (g[(y)*n+(x)]) int run_block(int n, double d, int* restrict g, double* restrict w, double* restrict wnew, int* restrict degree, int start, int count, double* restrict wlocal) { double residual = 0.0; for (int i=0; i<count; ++i) { double sum = 0.0; //do before the block for (int j=0; j<start; ++j) { //find edges pointing toward i if (g(j,i+start)) { //count out degree of j sum += w[j]/(double)degree[j]; } } // do the block for (int j=start; j<start+count; ++j) { //find edges pointing toward i if (g(j,i+start)) { //count out degree of j sum += wnew[j]/(double)degree[j]; } } // do after the block for (int j=start+count; j<n; ++j) { //find edges pointing toward i if (g(j,i+start)) { //count out degree of j sum += w[j]/(double)degree[j]; } } double newVal = ((1.0 - d)/(double)n) + (d*sum); residual += fabs(wnew[i+start] - newVal); wlocal[i] = newVal; } return residual < ((double)count)/(1000000.0 * (double)n); } /** * Pr(x) = (1-d)/n + d*sum_{n in g(n,x)}(Pr(n)/(outdegree n)) * Runs 1 iteration of pagerank * Returns 1 if done, 0 otherwise */ int run_iteration(int n, double d, int* restrict g, double* restrict w, double* restrict wnew, int* restrict degree) { int iterationDone = 1; #pragma omp parallel shared(w, wnew) reduction(&& : iterationDone) { int this_thread = omp_get_thread_num(), num_threads = omp_get_num_threads(); int start = (n/num_threads) * this_thread; int count; if (this_thread == num_threads - 1) { count = n - start; } else { count = ((n/num_threads) * (this_thread + 1)) - start; } double* wlocal = (double*)calloc(count, sizeof(double)); memcpy(wlocal, wnew+start, count * sizeof(double)); int done = 0; while (!done) { done = run_block(n, d, g, w, wnew, degree, start, count, wlocal); memcpy(wnew+start, wlocal, count * sizeof(double)); } free(wlocal); #pragma omp barrier for(int i=start; i<start+count; i++){ iterationDone = iterationDone && (fabs(w[i] - wnew[i]) < 1.0/(1000.0 * (double)n)); w[i] = wnew[i]; } } return iterationDone; } /** * */ int pagerank(int n, double d, int* restrict g, double* restrict w) { int iterations = 0; double* restrict wnew = (double*) calloc(n, sizeof(double)); memcpy(wnew, w, n * sizeof(double)); //compute degree of each item prior int* restrict degree = (int*) calloc(n, sizeof(int)); for (int i=0; i<n; ++i) { int count = 0; for (int j=0; j<n; ++j) { count += g(i,j); } degree[i] = count; } for (int done = 0; !done; ) { done = run_iteration(n, d, g, w, wnew, degree); iterations++; } free(wnew); free(degree); return iterations; } /** * # The random graph model * * Of course, we need to run the shortest path algorithm on something! * For the sake of keeping things interesting, let's use a simple random graph * model to generate the input data. The $G(n,p)$ model simply includes each * possible edge with probability $p$, drops it otherwise -- doesn't get much * simpler than that. We use a thread-safe version of the Mersenne twister * random number generator in lieu of coin flips. */ int* gen_graph(int n, double p) { int* g = calloc(n*n, sizeof(int)); struct mt19937p state; struct timeval time; gettimeofday(&time, NULL); sgenrand((unsigned long)time.tv_usec, &state); for (int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i) g(i, j) = (genrand(&state) < p); g(j, j) = 0; //no self edges } return g; } void write_matrix(const char* fname, int n, int* g) { FILE* fp = fopen(fname, "w+"); if (fp == NULL) { fprintf(stderr, "Could not open output file: %s\n", fname); exit(-1); } for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) fprintf(fp, "%d ", g(i,j)); fprintf(fp, "\n"); } fclose(fp); } void write_weights(const char* fname, int n, double* w) { FILE* fp = fopen(fname, "w+"); if (fp == NULL) { fprintf(stderr, "Could not open output file: %s\n", fname); exit(-1); } for (int i = 0; i < n; ++i) { fprintf(fp, "%g ", w[i]); } fprintf(fp, "\n"); fclose(fp); } double checksum(const double* restrict w, int n) { double sum = 0.0; for (int i=0; i<n; ++i) { sum += w[i]; } return sum; } /** * # The `main` event */ const char* usage = "pagerank.x -- Compute pagerank on a random graph\n" "Flags:\n" " - n -- number of nodes (200)\n" " - p -- probability of including edges (0.05)\n" " - d -- probability that a user follows a link (0.85)\n" " - i -- file name where adjacency matrix should be stored (none)\n" " - o -- file name where output weights should be stored (none)\n"; int main(int argc, char** argv) { int n = 200; // Number of nodes double p = 0.05; // Edge probability double d = 0.85; // Probability a link is followed const char* ifname = NULL; // Adjacency matrix file name const char* ofname = NULL; // Distance matrix file name // Option processing extern char* optarg; const char* optstring = "hn:d:p:o:i:"; int c; while ((c = getopt(argc, argv, optstring)) != -1) { switch (c) { case 'h': fprintf(stderr, "%s", usage); return -1; case 'n': n = atoi(optarg); break; case 'p': p = atof(optarg); break; case 'd': d = atof(optarg); break; case 'o': ofname = optarg; break; case 'i': ifname = optarg; break; } } // Graph generation + output int* g = gen_graph(n, p); if (ifname) write_matrix(ifname, n, g); // Generate initial weights double* w = calloc(n, sizeof(double)); for (int i = 0; i < n; ++i) { w[i] = 1.0/(double)n; } // Time the pagerank code double t0 = omp_get_wtime(); int iterations = pagerank(n, d, g, w); double t1 = omp_get_wtime(); //openmp, cores, time, n, iterations, p, d, checksum printf("openmp, %d, %g, %d, %d, %g, %g, %g\n", omp_get_max_threads(), (t1-t0), n, iterations, p, d, checksum(w, n)); // Generate output file if (ofname) write_weights(ofname, n, w); // Clean up free(g); free(w); return 0; }
GB_unop__erfc_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__erfc_fp64_fp64) // op(A') function: GB (_unop_tran__erfc_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = erfc (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = erfc (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = erfc (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ERFC || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__erfc_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = erfc (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = erfc (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__erfc_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_for_schedule_guided.c
<ompts:test> <ompts:testdescription>Test which checks the guided option of the omp for schedule directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp for schedule(guided)</ompts:directive> <ompts:dependences>omp flush,omp for nowait,omp critical,omp single</ompts:dependences> <ompts:testcode> /* Test for guided scheduling * Ensure threads get chunks interleavely first * Then judge the chunk sizes are decreasing to a stable value * Modified by Chunhua Liao * For example, 100 iteration on 2 threads, chunksize 7 * one line for each dispatch, 0/1 means thread id * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24 * 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 18 * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 * 1 1 1 1 1 1 1 1 1 1 10 * 0 0 0 0 0 0 0 0 8 * 1 1 1 1 1 1 1 7 * 0 0 0 0 0 0 0 7 * 1 1 1 1 1 1 1 7 * 0 0 0 0 0 5 */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" #define NUMBER_OF_THREADS 10 #define CFSMAX_SIZE 1000 #define MAX_TIME 0.005 #ifdef SLEEPTIME #undef SLEEPTIME #define SLEEPTIME 0.0001 #endif int <ompts:testcode:functionname>omp_for_schedule_guided</ompts:testcode:functionname> (FILE * logFile) { <ompts:orphan:vars> int * tids; int * chunksizes; int notout; int maxiter; </ompts:orphan:vars> int threads; int i; int result; tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1)); maxiter = 0; result = 1; notout = 1; /* Testing if enought threads are available for this check. */ #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads (); } /* end of single */ } /* end of parallel */ if (threads < 2) { printf ("This test only works with at least two threads .\n"); fprintf (logFile, "This test only works with at least two threads. Available were only %d thread(s).\n", threads); return (0); } /* end if */ /* Now the real parallel work: * * Each thread will start immediately with the first chunk. */ #pragma omp parallel shared(tids,maxiter) { /* begin of parallel */ <ompts:orphan> double count; int tid; int j; tid = omp_get_thread_num (); #pragma omp for nowait <ompts:check>schedule(guided)</ompts:check> for(j = 0; j < CFSMAX_SIZE; ++j) { count = 0.; #pragma omp flush(maxiter) if (j > maxiter) { #pragma omp critical { maxiter = j; } /* end of critical */ } /*printf ("thread %d sleeping\n", tid);*/ #pragma omp flush(maxiter,notout) while (notout && (count < MAX_TIME) && (maxiter == j)) { #pragma omp flush(maxiter,notout) my_sleep (SLEEPTIME); count += SLEEPTIME; #ifdef VERBOSE printf("."); #endif } #ifdef VERBOSE if (count > 0.) printf(" waited %lf s\n", count); #endif /*printf ("thread %d awake\n", tid);*/ tids[j] = tid; #ifdef VERBOSE printf("%d finished by %d\n",j,tid); #endif } /* end of for */ notout = 0; #pragma omp flush(maxiter,notout) </ompts:orphan> } /* end of parallel */ /******************************************************* * evaluation of the values * *******************************************************/ { int determined_chunksize = 1; int last_threadnr = tids[0]; int global_chunknr = 0; int local_chunknr[NUMBER_OF_THREADS]; int openwork = CFSMAX_SIZE; int expected_chunk_size; double c = 1; for (i = 0; i < NUMBER_OF_THREADS; i++) local_chunknr[i] = 0; tids[CFSMAX_SIZE] = -1; /* * determine the number of global chunks */ /*fprintf(logFile,"# global_chunknr thread local_chunknr chunksize\n"); */ for(i = 1; i <= CFSMAX_SIZE; ++i) { if (last_threadnr==tids[i]) { determined_chunksize++; } else { /* fprintf (logFile, "%d\t%d\t%d\t%d\n", global_chunknr,last_threadnr, local_chunknr[last_threadnr], m); */ global_chunknr++; local_chunknr[last_threadnr]++; last_threadnr = tids[i]; determined_chunksize = 1; } } /* now allocate the memory for saving the sizes of the global chunks */ chunksizes = (int*)malloc(global_chunknr * sizeof(int)); /* * Evaluate the sizes of the global chunks */ global_chunknr = 0; determined_chunksize = 1; last_threadnr = tids[0]; for (i = 1; i <= CFSMAX_SIZE; ++i) { /* If the threadnumber was the same as before increase the detected chunksize for this chunk * otherwise set the detected chunksize again to one and save the number of the next thread in last_threadnr. */ if (last_threadnr == tids[i]) { determined_chunksize++; } else { chunksizes[global_chunknr] = determined_chunksize; global_chunknr++; local_chunknr[last_threadnr]++; last_threadnr = tids[i]; determined_chunksize = 1; } } #ifdef VERBOSE fprintf (logFile, "found\texpected\tconstant\n"); #endif /* identify the constant c for the exponential decrease of the chunksize */ expected_chunk_size = openwork / threads; c = (double) chunksizes[0] / expected_chunk_size; for (i = 0; i < global_chunknr; i++) { /* calculate the new expected chunksize */ if (expected_chunk_size > 1) expected_chunk_size = c * openwork / threads; #ifdef VERBOSE fprintf (logFile, "%8d\t%8d\t%lf\n", chunksizes[i], expected_chunk_size, c * chunksizes[i]/expected_chunk_size); #endif /* check if chunksize is inside the rounding errors */ if (abs (chunksizes[i] - expected_chunk_size) >= 2) { result = 0; #ifndef VERBOSE fprintf (logFile, "Chunksize differed from expected value: %d instead of %d\n", chunksizes[i], expected_chunk_size); return 0; #endif } /* end if */ #ifndef VERBOSE if (expected_chunk_size - chunksizes[i] < 0 ) fprintf (logFile, "Chunksize did not decrease: %d instead of %d\n", chunksizes[i],expected_chunk_size); #endif /* calculating the remaining amount of work */ openwork -= chunksizes[i]; } } return result; } </ompts:testcode> </ompts:test>
x_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB BT code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" //#include "timers.h" //--------------------------------------------------------------------- // // Performs line solves in X direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. // //--------------------------------------------------------------------- void x_solve() { int i, j, k, m, n, isize, z; // double pivot, coeff; int gp22, gp12; // double temp1, temp2, temp3; double fjacX[5][5][PROBLEM_SIZE+1][JMAXP-1][KMAX-1]; double njacX[5][5][PROBLEM_SIZE+1][JMAXP-1][KMAX-1]; double lhsX[5][5][3][PROBLEM_SIZE][JMAXP-1][KMAX-1]; double pivot,coeff,temp1, temp2, temp3; gp22 = grid_points[2]-2; gp12 = grid_points[1]-2; //--------------------------------------------------------------------- // This function computes the left hand side in the xi-direction //--------------------------------------------------------------------- isize = grid_points[0]-1; //--------------------------------------------------------------------- // determine a (labeled f) and n jacobians //--------------------------------------------------------------------- #pragma omp target data map(alloc:fjacX[:][:][:][:][:],njacX[:][:][:][:][:],lhsX[:][:][:][:][:][:]) //present(rho_i,u,qs,rhs,square) { #pragma omp target teams distribute parallel for collapse(2) private(temp1,temp2,temp3,i,j,k) for (i = 0; i <= isize; i++) { for (j = 1; j <= gp12; j++) { for (k = 1; k <= gp22; k++) { temp1 = rho_i[k][j][i]; temp2 = temp1 * temp1; temp3 = temp1 * temp2; //------------------------------------------------------------------- // //------------------------------------------------------------------- fjacX[0][0][i][j][k] = 0.0; fjacX[0][1][i][j][k] = 1.0; fjacX[0][2][i][j][k] = 0.0; fjacX[0][3][i][j][k] = 0.0; fjacX[0][4][i][j][k] = 0.0; fjacX[1][0][i][j][k] = -(u[k][j][i][1] * temp2 * u[k][j][i][1]) + c2 * qs[k][j][i]; fjacX[1][1][i][j][k] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] ); fjacX[1][2][i][j][k] = - c2 * ( u[k][j][i][2] * temp1 ); fjacX[1][3][i][j][k] = - c2 * ( u[k][j][i][3] * temp1 ); fjacX[1][4][i][j][k] = c2; fjacX[2][0][i][j][k] = - ( u[k][j][i][1]*u[k][j][i][2] ) * temp2; fjacX[2][1][i][j][k] = u[k][j][i][2] * temp1; fjacX[2][2][i][j][k] = u[k][j][i][1] * temp1; fjacX[2][3][i][j][k] = 0.0; fjacX[2][4][i][j][k] = 0.0; fjacX[3][0][i][j][k] = - ( u[k][j][i][1]*u[k][j][i][3] ) * temp2; fjacX[3][1][i][j][k] = u[k][j][i][3] * temp1; fjacX[3][2][i][j][k] = 0.0; fjacX[3][3][i][j][k] = u[k][j][i][1] * temp1; fjacX[3][4][i][j][k] = 0.0; fjacX[4][0][i][j][k] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * ( u[k][j][i][1] * temp2 ); fjacX[4][1][i][j][k] = c1 * u[k][j][i][4] * temp1 - c2 * ( u[k][j][i][1]*u[k][j][i][1] * temp2 + qs[k][j][i] ); fjacX[4][2][i][j][k] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * temp2; fjacX[4][3][i][j][k] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * temp2; fjacX[4][4][i][j][k] = c1 * ( u[k][j][i][1] * temp1 ); njacX[0][0][i][j][k] = 0.0; njacX[0][1][i][j][k] = 0.0; njacX[0][2][i][j][k] = 0.0; njacX[0][3][i][j][k] = 0.0; njacX[0][4][i][j][k] = 0.0; njacX[1][0][i][j][k] = - con43 * c3c4 * temp2 * u[k][j][i][1]; njacX[1][1][i][j][k] = con43 * c3c4 * temp1; njacX[1][2][i][j][k] = 0.0; njacX[1][3][i][j][k] = 0.0; njacX[1][4][i][j][k] = 0.0; njacX[2][0][i][j][k] = - c3c4 * temp2 * u[k][j][i][2]; njacX[2][1][i][j][k] = 0.0; njacX[2][2][i][j][k] = c3c4 * temp1; njacX[2][3][i][j][k] = 0.0; njacX[2][4][i][j][k] = 0.0; njacX[3][0][i][j][k] = - c3c4 * temp2 * u[k][j][i][3]; njacX[3][1][i][j][k] = 0.0; njacX[3][2][i][j][k] = 0.0; njacX[3][3][i][j][k] = c3c4 * temp1; njacX[3][4][i][j][k] = 0.0; njacX[4][0][i][j][k] = - ( con43 * c3c4 - c1345 ) * temp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( c3c4 - c1345 ) * temp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( c3c4 - c1345 ) * temp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * temp2 * u[k][j][i][4]; njacX[4][1][i][j][k] = ( con43 * c3c4 - c1345 ) * temp2 * u[k][j][i][1]; njacX[4][2][i][j][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][2]; njacX[4][3][i][j][k] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][3]; njacX[4][4][i][j][k] = ( c1345 ) * temp1; } } } //--------------------------------------------------------------------- // now jacobians set, so form left hand side in x direction //--------------------------------------------------------------------- // lhsX[k][j]init(lhsX[k][j], isize); // zero the whole left hand side for starters #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for collapse(3) private(k,j,n,m) #else #pragma omp target teams distribute parallel for simd collapse(4) #endif for (k = 1; k <= gp22; k++) { for (j = 1; j <= gp12; j++) { for (n = 0; n < 5; n++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (m = 0; m < 5; m++){ lhsX[m][n][0][0][j][k] = 0.0; lhsX[m][n][1][0][j][k] = 0.0; lhsX[m][n][2][0][j][k] = 0.0; lhsX[m][n][0][isize][j][k] = 0.0; lhsX[m][n][1][isize][j][k] = 0.0; lhsX[m][n][2][isize][j][k] = 0.0; } } } } // next, set all diagonal values to 1. This is overkill, but convenient #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(k,j) // collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= gp22; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= gp12; j++) { lhsX[0][0][1][0][j][k] = 1.0; lhsX[0][0][1][isize][j][k] = 1.0; lhsX[1][1][1][0][j][k] = 1.0; lhsX[1][1][1][isize][j][k] = 1.0; lhsX[2][2][1][0][j][k] = 1.0; lhsX[2][2][1][isize][j][k] = 1.0; lhsX[3][3][1][0][j][k] = 1.0; lhsX[3][3][1][isize][j][k] = 1.0; lhsX[4][4][1][0][j][k] = 1.0; lhsX[4][4][1][isize][j][k] = 1.0; } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for collapse(2) private(j,k) #else #pragma omp target teams distribute parallel for simd collapse(3) private(temp1,temp2) #endif for (i = 1; i <= isize-1; i++) { for (j = 1; j <= gp12; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(temp1, temp2) #endif for (k = 1; k <= gp22; k++) { temp1 = dt * tx1; temp2 = dt * tx2; lhsX[0][0][AA][i][j][k] = - temp2 * fjacX[0][0][i-1][j][k] - temp1 * njacX[0][0][i-1][j][k] - temp1 * dx1; lhsX[0][1][AA][i][j][k] = - temp2 * fjacX[0][1][i-1][j][k] - temp1 * njacX[0][1][i-1][j][k]; lhsX[0][2][AA][i][j][k] = - temp2 * fjacX[0][2][i-1][j][k] - temp1 * njacX[0][2][i-1][j][k]; lhsX[0][3][AA][i][j][k] = - temp2 * fjacX[0][3][i-1][j][k] - temp1 * njacX[0][3][i-1][j][k]; lhsX[0][4][AA][i][j][k] = - temp2 * fjacX[0][4][i-1][j][k] - temp1 * njacX[0][4][i-1][j][k]; lhsX[1][0][AA][i][j][k] = - temp2 * fjacX[1][0][i-1][j][k] - temp1 * njacX[1][0][i-1][j][k]; lhsX[1][1][AA][i][j][k] = - temp2 * fjacX[1][1][i-1][j][k] - temp1 * njacX[1][1][i-1][j][k] - temp1 * dx2; lhsX[1][2][AA][i][j][k] = - temp2 * fjacX[1][2][i-1][j][k] - temp1 * njacX[1][2][i-1][j][k]; lhsX[1][3][AA][i][j][k] = - temp2 * fjacX[1][3][i-1][j][k] - temp1 * njacX[1][3][i-1][j][k]; lhsX[1][4][AA][i][j][k] = - temp2 * fjacX[1][4][i-1][j][k] - temp1 * njacX[1][4][i-1][j][k]; lhsX[2][0][AA][i][j][k] = - temp2 * fjacX[2][0][i-1][j][k] - temp1 * njacX[2][0][i-1][j][k]; lhsX[2][1][AA][i][j][k] = - temp2 * fjacX[2][1][i-1][j][k] - temp1 * njacX[2][1][i-1][j][k]; lhsX[2][2][AA][i][j][k] = - temp2 * fjacX[2][2][i-1][j][k] - temp1 * njacX[2][2][i-1][j][k] - temp1 * dx3; lhsX[2][3][AA][i][j][k] = - temp2 * fjacX[2][3][i-1][j][k] - temp1 * njacX[2][3][i-1][j][k]; lhsX[2][4][AA][i][j][k] = - temp2 * fjacX[2][4][i-1][j][k] - temp1 * njacX[2][4][i-1][j][k]; lhsX[3][0][AA][i][j][k] = - temp2 * fjacX[3][0][i-1][j][k] - temp1 * njacX[3][0][i-1][j][k]; lhsX[3][1][AA][i][j][k] = - temp2 * fjacX[3][1][i-1][j][k] - temp1 * njacX[3][1][i-1][j][k]; lhsX[3][2][AA][i][j][k] = - temp2 * fjacX[3][2][i-1][j][k] - temp1 * njacX[3][2][i-1][j][k]; lhsX[3][3][AA][i][j][k] = - temp2 * fjacX[3][3][i-1][j][k] - temp1 * njacX[3][3][i-1][j][k] - temp1 * dx4; lhsX[3][4][AA][i][j][k] = - temp2 * fjacX[3][4][i-1][j][k] - temp1 * njacX[3][4][i-1][j][k]; lhsX[4][0][AA][i][j][k] = - temp2 * fjacX[4][0][i-1][j][k] - temp1 * njacX[4][0][i-1][j][k]; lhsX[4][1][AA][i][j][k] = - temp2 * fjacX[4][1][i-1][j][k] - temp1 * njacX[4][1][i-1][j][k]; lhsX[4][2][AA][i][j][k] = - temp2 * fjacX[4][2][i-1][j][k] - temp1 * njacX[4][2][i-1][j][k]; lhsX[4][3][AA][i][j][k] = - temp2 * fjacX[4][3][i-1][j][k] - temp1 * njacX[4][3][i-1][j][k]; lhsX[4][4][AA][i][j][k] = - temp2 * fjacX[4][4][i-1][j][k] - temp1 * njacX[4][4][i-1][j][k] - temp1 * dx5; lhsX[0][0][BB][i][j][k] = 1.0 + temp1 * 2.0 * njacX[0][0][i][j][k] + temp1 * 2.0 * dx1; lhsX[0][1][BB][i][j][k] = temp1 * 2.0 * njacX[0][1][i][j][k]; lhsX[0][2][BB][i][j][k] = temp1 * 2.0 * njacX[0][2][i][j][k]; lhsX[0][3][BB][i][j][k] = temp1 * 2.0 * njacX[0][3][i][j][k]; lhsX[0][4][BB][i][j][k] = temp1 * 2.0 * njacX[0][4][i][j][k]; lhsX[1][0][BB][i][j][k] = temp1 * 2.0 * njacX[1][0][i][j][k]; lhsX[1][1][BB][i][j][k] = 1.0 + temp1 * 2.0 * njacX[1][1][i][j][k] + temp1 * 2.0 * dx2; lhsX[1][2][BB][i][j][k] = temp1 * 2.0 * njacX[1][2][i][j][k]; lhsX[1][3][BB][i][j][k] = temp1 * 2.0 * njacX[1][3][i][j][k]; lhsX[1][4][BB][i][j][k] = temp1 * 2.0 * njacX[1][4][i][j][k]; lhsX[2][0][BB][i][j][k] = temp1 * 2.0 * njacX[2][0][i][j][k]; lhsX[2][1][BB][i][j][k] = temp1 * 2.0 * njacX[2][1][i][j][k]; lhsX[2][2][BB][i][j][k] = 1.0 + temp1 * 2.0 * njacX[2][2][i][j][k] + temp1 * 2.0 * dx3; lhsX[2][3][BB][i][j][k] = temp1 * 2.0 * njacX[2][3][i][j][k]; lhsX[2][4][BB][i][j][k] = temp1 * 2.0 * njacX[2][4][i][j][k]; lhsX[3][0][BB][i][j][k] = temp1 * 2.0 * njacX[3][0][i][j][k]; lhsX[3][1][BB][i][j][k] = temp1 * 2.0 * njacX[3][1][i][j][k]; lhsX[3][2][BB][i][j][k] = temp1 * 2.0 * njacX[3][2][i][j][k]; lhsX[3][3][BB][i][j][k] = 1.0 + temp1 * 2.0 * njacX[3][3][i][j][k] + temp1 * 2.0 * dx4; lhsX[3][4][BB][i][j][k] = temp1 * 2.0 * njacX[3][4][i][j][k]; lhsX[4][0][BB][i][j][k] = temp1 * 2.0 * njacX[4][0][i][j][k]; lhsX[4][1][BB][i][j][k] = temp1 * 2.0 * njacX[4][1][i][j][k]; lhsX[4][2][BB][i][j][k] = temp1 * 2.0 * njacX[4][2][i][j][k]; lhsX[4][3][BB][i][j][k] = temp1 * 2.0 * njacX[4][3][i][j][k]; lhsX[4][4][BB][i][j][k] = 1.0 + temp1 * 2.0 * njacX[4][4][i][j][k] + temp1 * 2.0 * dx5; lhsX[0][0][CC][i][j][k] = temp2 * fjacX[0][0][i+1][j][k] - temp1 * njacX[0][0][i+1][j][k] - temp1 * dx1; lhsX[0][1][CC][i][j][k] = temp2 * fjacX[0][1][i+1][j][k] - temp1 * njacX[0][1][i+1][j][k]; lhsX[0][2][CC][i][j][k] = temp2 * fjacX[0][2][i+1][j][k] - temp1 * njacX[0][2][i+1][j][k]; lhsX[0][3][CC][i][j][k] = temp2 * fjacX[0][3][i+1][j][k] - temp1 * njacX[0][3][i+1][j][k]; lhsX[0][4][CC][i][j][k] = temp2 * fjacX[0][4][i+1][j][k] - temp1 * njacX[0][4][i+1][j][k]; lhsX[1][0][CC][i][j][k] = temp2 * fjacX[1][0][i+1][j][k] - temp1 * njacX[1][0][i+1][j][k]; lhsX[1][1][CC][i][j][k] = temp2 * fjacX[1][1][i+1][j][k] - temp1 * njacX[1][1][i+1][j][k] - temp1 * dx2; lhsX[1][2][CC][i][j][k] = temp2 * fjacX[1][2][i+1][j][k] - temp1 * njacX[1][2][i+1][j][k]; lhsX[1][3][CC][i][j][k] = temp2 * fjacX[1][3][i+1][j][k] - temp1 * njacX[1][3][i+1][j][k]; lhsX[1][4][CC][i][j][k] = temp2 * fjacX[1][4][i+1][j][k] - temp1 * njacX[1][4][i+1][j][k]; lhsX[2][0][CC][i][j][k] = temp2 * fjacX[2][0][i+1][j][k] - temp1 * njacX[2][0][i+1][j][k]; lhsX[2][1][CC][i][j][k] = temp2 * fjacX[2][1][i+1][j][k] - temp1 * njacX[2][1][i+1][j][k]; lhsX[2][2][CC][i][j][k] = temp2 * fjacX[2][2][i+1][j][k] - temp1 * njacX[2][2][i+1][j][k] - temp1 * dx3; lhsX[2][3][CC][i][j][k] = temp2 * fjacX[2][3][i+1][j][k] - temp1 * njacX[2][3][i+1][j][k]; lhsX[2][4][CC][i][j][k] = temp2 * fjacX[2][4][i+1][j][k] - temp1 * njacX[2][4][i+1][j][k]; lhsX[3][0][CC][i][j][k] = temp2 * fjacX[3][0][i+1][j][k] - temp1 * njacX[3][0][i+1][j][k]; lhsX[3][1][CC][i][j][k] = temp2 * fjacX[3][1][i+1][j][k] - temp1 * njacX[3][1][i+1][j][k]; lhsX[3][2][CC][i][j][k] = temp2 * fjacX[3][2][i+1][j][k] - temp1 * njacX[3][2][i+1][j][k]; lhsX[3][3][CC][i][j][k] = temp2 * fjacX[3][3][i+1][j][k] - temp1 * njacX[3][3][i+1][j][k] - temp1 * dx4; lhsX[3][4][CC][i][j][k] = temp2 * fjacX[3][4][i+1][j][k] - temp1 * njacX[3][4][i+1][j][k]; lhsX[4][0][CC][i][j][k] = temp2 * fjacX[4][0][i+1][j][k] - temp1 * njacX[4][0][i+1][j][k]; lhsX[4][1][CC][i][j][k] = temp2 * fjacX[4][1][i+1][j][k] - temp1 * njacX[4][1][i+1][j][k]; lhsX[4][2][CC][i][j][k] = temp2 * fjacX[4][2][i+1][j][k] - temp1 * njacX[4][2][i+1][j][k]; lhsX[4][3][CC][i][j][k] = temp2 * fjacX[4][3][i+1][j][k] - temp1 * njacX[4][3][i+1][j][k]; lhsX[4][4][CC][i][j][k] = temp2 * fjacX[4][4][i+1][j][k] - temp1 * njacX[4][4][i+1][j][k] - temp1 * dx5; } } } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(IMAX) and rhs'(IMAX) will be sent to next cell //--------------------------------------------------------------------- //--------------------------------------------------------------------- // outer most do loops - sweeping in i direction //--------------------------------------------------------------------- //--------------------------------------------------------------------- // multiply c[k][j][0] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- //binvcrhs( lhsX[0][j][BB], lhsX[k][0][j][k][CC], rhs[k][j][0] ); #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(j,k,pivot, coeff) #else #pragma omp target teams distribute parallel for simd private(pivot, coeff) collapse(2) #endif for (j = 1; j <= gp12; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(pivot, coeff) #endif for (k = 1; k <= gp22; k++) { /* for(m = 0; m < 5; m++){ pivot = 1.00/lhsX[m][m][BB][0][j][k]; for(n = m+1; n < 5; n++){ lhsX[m][n][BB][0][j][k] = lhsX[m][n][BB][0][j][k]*pivot; } lhsX[m][0][CC][0][j][k] = lhsX[m][0][CC][0][j][k]*pivot; lhsX[m][1][CC][0][j][k] = lhsX[m][1][CC][0][j][k]*pivot; lhsX[m][2][CC][0][j][k] = lhsX[m][2][CC][0][j][k]*pivot; lhsX[m][3][CC][0][j][k] = lhsX[m][3][CC][0][j][k]*pivot; lhsX[m][4][CC][0][j][k] = lhsX[m][4][CC][0][j][k]*pivot; rhs[k][j][0][m] = rhs[k][j][0][m]*pivot; for(n = 0; n < 5; n++){ if(n != m){ coeff = lhsX[n][m][BB][0][j][k]; for(z = m+1; z < 5; z++){ lhsX[n][z][BB][0][j][k] = lhsX[n][z][BB][0][j][k] - coeff*lhsX[m][z][BB][0][j][k]; } lhsX[n][0][CC][0][j][k] = lhsX[n][0][CC][0][j][k] - coeff*lhsX[m][0][CC][0][j][k]; lhsX[n][1][CC][0][j][k] = lhsX[n][1][CC][0][j][k] - coeff*lhsX[m][1][CC][0][j][k]; lhsX[n][2][CC][0][j][k] = lhsX[n][2][CC][0][j][k] - coeff*lhsX[m][2][CC][0][j][k]; lhsX[n][3][CC][0][j][k] = lhsX[n][3][CC][0][j][k] - coeff*lhsX[m][3][CC][0][j][k]; lhsX[n][4][CC][0][j][k] = lhsX[n][4][CC][0][j][k] - coeff*lhsX[m][4][CC][0][j][k]; rhs[k][j][0][n] = rhs[k][j][0][n] - coeff*rhs[k][j][0][m]; } } } */ pivot = 1.00/lhsX[0][0][BB][0][j][k]; lhsX[0][1][BB][0][j][k] = lhsX[0][1][BB][0][j][k]*pivot; lhsX[0][2][BB][0][j][k] = lhsX[0][2][BB][0][j][k]*pivot; lhsX[0][3][BB][0][j][k] = lhsX[0][3][BB][0][j][k]*pivot; lhsX[0][4][BB][0][j][k] = lhsX[0][4][BB][0][j][k]*pivot; lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k]*pivot; lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k]*pivot; lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k]*pivot; lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k]*pivot; lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k]*pivot; rhs[k][j][0][0] = rhs[k][j][0][0] *pivot; coeff = lhsX[1][0][BB][0][j][k]; lhsX[1][1][BB][0][j][k]= lhsX[1][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k]; lhsX[1][2][BB][0][j][k]= lhsX[1][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k]; lhsX[1][3][BB][0][j][k]= lhsX[1][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k]; lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k]; lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k]; lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k]; lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k]; lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k]; lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][0]; coeff = lhsX[2][0][BB][0][j][k]; lhsX[2][1][BB][0][j][k]= lhsX[2][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k]; lhsX[2][2][BB][0][j][k]= lhsX[2][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k]; lhsX[2][3][BB][0][j][k]= lhsX[2][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k]; lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k]; lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k]; lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k]; lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k]; lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k]; lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][0]; coeff = lhsX[3][0][BB][0][j][k]; lhsX[3][1][BB][0][j][k]= lhsX[3][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k]; lhsX[3][2][BB][0][j][k]= lhsX[3][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k]; lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k]; lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k]; lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k]; lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k]; lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k]; lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k]; lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][0]; coeff = lhsX[4][0][BB][0][j][k]; lhsX[4][1][BB][0][j][k]= lhsX[4][1][BB][0][j][k] - coeff*lhsX[0][1][BB][0][j][k]; lhsX[4][2][BB][0][j][k]= lhsX[4][2][BB][0][j][k] - coeff*lhsX[0][2][BB][0][j][k]; lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[0][3][BB][0][j][k]; lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[0][4][BB][0][j][k]; lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[0][0][CC][0][j][k]; lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[0][1][CC][0][j][k]; lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[0][2][CC][0][j][k]; lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[0][3][CC][0][j][k]; lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[0][4][CC][0][j][k]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][0]; pivot = 1.00/lhsX[1][1][BB][0][j][k]; lhsX[1][2][BB][0][j][k] = lhsX[1][2][BB][0][j][k]*pivot; lhsX[1][3][BB][0][j][k] = lhsX[1][3][BB][0][j][k]*pivot; lhsX[1][4][BB][0][j][k] = lhsX[1][4][BB][0][j][k]*pivot; lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k]*pivot; lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k]*pivot; lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k]*pivot; lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k]*pivot; lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k]*pivot; rhs[k][j][0][1] = rhs[k][j][0][1] *pivot; coeff = lhsX[0][1][BB][0][j][k]; lhsX[0][2][BB][0][j][k]= lhsX[0][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k]; lhsX[0][3][BB][0][j][k]= lhsX[0][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k]; lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k]; lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k]; lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k]; lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k]; lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k]; lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][1]; coeff = lhsX[2][1][BB][0][j][k]; lhsX[2][2][BB][0][j][k]= lhsX[2][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k]; lhsX[2][3][BB][0][j][k]= lhsX[2][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k]; lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k]; lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k]; lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k]; lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k]; lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k]; lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][1]; coeff = lhsX[3][1][BB][0][j][k]; lhsX[3][2][BB][0][j][k]= lhsX[3][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k]; lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k]; lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k]; lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k]; lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k]; lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k]; lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k]; lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][1]; coeff = lhsX[4][1][BB][0][j][k]; lhsX[4][2][BB][0][j][k]= lhsX[4][2][BB][0][j][k] - coeff*lhsX[1][2][BB][0][j][k]; lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[1][3][BB][0][j][k]; lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[1][4][BB][0][j][k]; lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[1][0][CC][0][j][k]; lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[1][1][CC][0][j][k]; lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[1][2][CC][0][j][k]; lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[1][3][CC][0][j][k]; lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[1][4][CC][0][j][k]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][1]; pivot = 1.00/lhsX[2][2][BB][0][j][k]; lhsX[2][3][BB][0][j][k] = lhsX[2][3][BB][0][j][k]*pivot; lhsX[2][4][BB][0][j][k] = lhsX[2][4][BB][0][j][k]*pivot; lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k]*pivot; lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k]*pivot; lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k]*pivot; lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k]*pivot; lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k]*pivot; rhs[k][j][0][2] = rhs[k][j][0][2] *pivot; coeff = lhsX[0][2][BB][0][j][k]; lhsX[0][3][BB][0][j][k]= lhsX[0][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k]; lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k]; lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k]; lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k]; lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k]; lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k]; lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][2]; coeff = lhsX[1][2][BB][0][j][k]; lhsX[1][3][BB][0][j][k]= lhsX[1][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k]; lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k]; lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k]; lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k]; lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k]; lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k]; lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][2]; coeff = lhsX[3][2][BB][0][j][k]; lhsX[3][3][BB][0][j][k]= lhsX[3][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k]; lhsX[3][4][BB][0][j][k]= lhsX[3][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k]; lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k]; lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k]; lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k]; lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k]; lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][2]; coeff = lhsX[4][2][BB][0][j][k]; lhsX[4][3][BB][0][j][k]= lhsX[4][3][BB][0][j][k] - coeff*lhsX[2][3][BB][0][j][k]; lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[2][4][BB][0][j][k]; lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[2][0][CC][0][j][k]; lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[2][1][CC][0][j][k]; lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[2][2][CC][0][j][k]; lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[2][3][CC][0][j][k]; lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[2][4][CC][0][j][k]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][2]; pivot = 1.00/lhsX[3][3][BB][0][j][k]; lhsX[3][4][BB][0][j][k] = lhsX[3][4][BB][0][j][k]*pivot; lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k]*pivot; lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k]*pivot; lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k]*pivot; lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k]*pivot; lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k]*pivot; rhs[k][j][0][3] = rhs[k][j][0][3] *pivot; coeff = lhsX[0][3][BB][0][j][k]; lhsX[0][4][BB][0][j][k]= lhsX[0][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k]; lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k]; lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k]; lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k]; lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k]; lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][3]; coeff = lhsX[1][3][BB][0][j][k]; lhsX[1][4][BB][0][j][k]= lhsX[1][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k]; lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k]; lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k]; lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k]; lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k]; lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][3]; coeff = lhsX[2][3][BB][0][j][k]; lhsX[2][4][BB][0][j][k]= lhsX[2][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k]; lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k]; lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k]; lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k]; lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k]; lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][3]; coeff = lhsX[4][3][BB][0][j][k]; lhsX[4][4][BB][0][j][k]= lhsX[4][4][BB][0][j][k] - coeff*lhsX[3][4][BB][0][j][k]; lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k] - coeff*lhsX[3][0][CC][0][j][k]; lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k] - coeff*lhsX[3][1][CC][0][j][k]; lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k] - coeff*lhsX[3][2][CC][0][j][k]; lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k] - coeff*lhsX[3][3][CC][0][j][k]; lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k] - coeff*lhsX[3][4][CC][0][j][k]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff*rhs[k][j][0][3]; pivot = 1.00/lhsX[4][4][BB][0][j][k]; lhsX[4][0][CC][0][j][k] = lhsX[4][0][CC][0][j][k]*pivot; lhsX[4][1][CC][0][j][k] = lhsX[4][1][CC][0][j][k]*pivot; lhsX[4][2][CC][0][j][k] = lhsX[4][2][CC][0][j][k]*pivot; lhsX[4][3][CC][0][j][k] = lhsX[4][3][CC][0][j][k]*pivot; lhsX[4][4][CC][0][j][k] = lhsX[4][4][CC][0][j][k]*pivot; rhs[k][j][0][4] = rhs[k][j][0][4] *pivot; coeff = lhsX[0][4][BB][0][j][k]; lhsX[0][0][CC][0][j][k] = lhsX[0][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k]; lhsX[0][1][CC][0][j][k] = lhsX[0][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k]; lhsX[0][2][CC][0][j][k] = lhsX[0][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k]; lhsX[0][3][CC][0][j][k] = lhsX[0][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k]; lhsX[0][4][CC][0][j][k] = lhsX[0][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff*rhs[k][j][0][4]; coeff = lhsX[1][4][BB][0][j][k]; lhsX[1][0][CC][0][j][k] = lhsX[1][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k]; lhsX[1][1][CC][0][j][k] = lhsX[1][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k]; lhsX[1][2][CC][0][j][k] = lhsX[1][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k]; lhsX[1][3][CC][0][j][k] = lhsX[1][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k]; lhsX[1][4][CC][0][j][k] = lhsX[1][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff*rhs[k][j][0][4]; coeff = lhsX[2][4][BB][0][j][k]; lhsX[2][0][CC][0][j][k] = lhsX[2][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k]; lhsX[2][1][CC][0][j][k] = lhsX[2][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k]; lhsX[2][2][CC][0][j][k] = lhsX[2][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k]; lhsX[2][3][CC][0][j][k] = lhsX[2][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k]; lhsX[2][4][CC][0][j][k] = lhsX[2][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff*rhs[k][j][0][4]; coeff = lhsX[3][4][BB][0][j][k]; lhsX[3][0][CC][0][j][k] = lhsX[3][0][CC][0][j][k] - coeff*lhsX[4][0][CC][0][j][k]; lhsX[3][1][CC][0][j][k] = lhsX[3][1][CC][0][j][k] - coeff*lhsX[4][1][CC][0][j][k]; lhsX[3][2][CC][0][j][k] = lhsX[3][2][CC][0][j][k] - coeff*lhsX[4][2][CC][0][j][k]; lhsX[3][3][CC][0][j][k] = lhsX[3][3][CC][0][j][k] - coeff*lhsX[4][3][CC][0][j][k]; lhsX[3][4][CC][0][j][k] = lhsX[3][4][CC][0][j][k] - coeff*lhsX[4][4][CC][0][j][k]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff*rhs[k][j][0][4]; }/*end j*/ }/*end k*/ //--------------------------------------------------------------------- // begin inner most do loop // do all the elements of the cell unless last //--------------------------------------------------------------------- #pragma omp target teams distribute parallel for private(i,k) for (j = 1; j <= gp12; j++) { for (i = 1; i <= isize-1; i++) { #pragma omp simd private(pivot,coeff) for (k = 1; k <= gp22; k++) { //------------------------------------------------------------------- // rhs(i) = rhs(i) - A*rhs(i-1) //------------------------------------------------------------------- //matvec_sub(lhsX[i-1][j][AA], rhs[k][i][j][k], rhs[k][j][i]); /* for(m = 0; m < 5; m++){ rhs[k][j][i][m] = rhs[k][j][i][m] - lhsX[m][0][AA][i][j][k]*rhs[k][j][i-1][0] - lhsX[m][1][AA][i][j][k]*rhs[k][j][i-1][1] - lhsX[m][2][AA][i][j][k]*rhs[k][j][i-1][2] - lhsX[m][3][AA][i][j][k]*rhs[k][j][i-1][3] - lhsX[m][4][AA][i][j][k]*rhs[k][j][i-1][4]; } */ rhs[k][j][i][0] = rhs[k][j][i][0] - lhsX[0][0][AA][i][j][k]*rhs[k][j][i-1][0] - lhsX[0][1][AA][i][j][k]*rhs[k][j][i-1][1] - lhsX[0][2][AA][i][j][k]*rhs[k][j][i-1][2] - lhsX[0][3][AA][i][j][k]*rhs[k][j][i-1][3] - lhsX[0][4][AA][i][j][k]*rhs[k][j][i-1][4]; rhs[k][j][i][1] = rhs[k][j][i][1] - lhsX[1][0][AA][i][j][k]*rhs[k][j][i-1][0] - lhsX[1][1][AA][i][j][k]*rhs[k][j][i-1][1] - lhsX[1][2][AA][i][j][k]*rhs[k][j][i-1][2] - lhsX[1][3][AA][i][j][k]*rhs[k][j][i-1][3] - lhsX[1][4][AA][i][j][k]*rhs[k][j][i-1][4]; rhs[k][j][i][2] = rhs[k][j][i][2] - lhsX[2][0][AA][i][j][k]*rhs[k][j][i-1][0] - lhsX[2][1][AA][i][j][k]*rhs[k][j][i-1][1] - lhsX[2][2][AA][i][j][k]*rhs[k][j][i-1][2] - lhsX[2][3][AA][i][j][k]*rhs[k][j][i-1][3] - lhsX[2][4][AA][i][j][k]*rhs[k][j][i-1][4]; rhs[k][j][i][3] = rhs[k][j][i][3] - lhsX[3][0][AA][i][j][k]*rhs[k][j][i-1][0] - lhsX[3][1][AA][i][j][k]*rhs[k][j][i-1][1] - lhsX[3][2][AA][i][j][k]*rhs[k][j][i-1][2] - lhsX[3][3][AA][i][j][k]*rhs[k][j][i-1][3] - lhsX[3][4][AA][i][j][k]*rhs[k][j][i-1][4]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsX[4][0][AA][i][j][k]*rhs[k][j][i-1][0] - lhsX[4][1][AA][i][j][k]*rhs[k][j][i-1][1] - lhsX[4][2][AA][i][j][k]*rhs[k][j][i-1][2] - lhsX[4][3][AA][i][j][k]*rhs[k][j][i-1][3] - lhsX[4][4][AA][i][j][k]*rhs[k][j][i-1][4]; //------------------------------------------------------------------- // B(i) = B(i) - C(i-1)*A(i) //------------------------------------------------------------------- // matmul_sub(lhsX[i-1][j][AA], lhsX[k][i][j][k][CC], lhsX[k][j][i][BB]); /* for(m = 0; m < 5; m++){ for(n = 0; n < 5; n++){ lhsX[n][m][BB][i][j][k] = lhsX[n][m][BB][i][j][k] - lhsX[n][0][AA][i][j][k]*lhsX[0][m][CC][i-1][j][k] - lhsX[n][1][AA][i][j][k]*lhsX[1][m][CC][i-1][j][k] - lhsX[n][2][AA][i][j][k]*lhsX[2][m][CC][i-1][j][k] - lhsX[n][3][AA][i][j][k]*lhsX[3][m][CC][i-1][j][k] - lhsX[n][4][AA][i][j][k]*lhsX[4][m][CC][i-1][j][k]; } } */ lhsX[0][0][BB][i][j][k] = lhsX[0][0][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k] - lhsX[0][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k] - lhsX[0][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k] - lhsX[0][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k] - lhsX[0][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k]; lhsX[1][0][BB][i][j][k] = lhsX[1][0][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k] - lhsX[1][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k] - lhsX[1][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k] - lhsX[1][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k] - lhsX[1][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k]; lhsX[2][0][BB][i][j][k] = lhsX[2][0][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k] - lhsX[2][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k] - lhsX[2][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k] - lhsX[2][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k] - lhsX[2][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k]; lhsX[3][0][BB][i][j][k] = lhsX[3][0][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k] - lhsX[3][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k] - lhsX[3][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k] - lhsX[3][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k] - lhsX[3][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k]; lhsX[4][0][BB][i][j][k] = lhsX[4][0][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][0][CC][i-1][j][k] - lhsX[4][1][AA][i][j][k]*lhsX[1][0][CC][i-1][j][k] - lhsX[4][2][AA][i][j][k]*lhsX[2][0][CC][i-1][j][k] - lhsX[4][3][AA][i][j][k]*lhsX[3][0][CC][i-1][j][k] - lhsX[4][4][AA][i][j][k]*lhsX[4][0][CC][i-1][j][k]; lhsX[0][1][BB][i][j][k] = lhsX[0][1][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k] - lhsX[0][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k] - lhsX[0][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k] - lhsX[0][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k] - lhsX[0][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k]; lhsX[1][1][BB][i][j][k] = lhsX[1][1][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k] - lhsX[1][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k] - lhsX[1][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k] - lhsX[1][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k] - lhsX[1][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k]; lhsX[2][1][BB][i][j][k] = lhsX[2][1][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k] - lhsX[2][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k] - lhsX[2][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k] - lhsX[2][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k] - lhsX[2][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k]; lhsX[3][1][BB][i][j][k] = lhsX[3][1][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k] - lhsX[3][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k] - lhsX[3][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k] - lhsX[3][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k] - lhsX[3][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k]; lhsX[4][1][BB][i][j][k] = lhsX[4][1][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][1][CC][i-1][j][k] - lhsX[4][1][AA][i][j][k]*lhsX[1][1][CC][i-1][j][k] - lhsX[4][2][AA][i][j][k]*lhsX[2][1][CC][i-1][j][k] - lhsX[4][3][AA][i][j][k]*lhsX[3][1][CC][i-1][j][k] - lhsX[4][4][AA][i][j][k]*lhsX[4][1][CC][i-1][j][k]; lhsX[0][2][BB][i][j][k] = lhsX[0][2][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k] - lhsX[0][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k] - lhsX[0][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k] - lhsX[0][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k] - lhsX[0][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k]; lhsX[1][2][BB][i][j][k] = lhsX[1][2][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k] - lhsX[1][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k] - lhsX[1][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k] - lhsX[1][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k] - lhsX[1][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k]; lhsX[2][2][BB][i][j][k] = lhsX[2][2][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k] - lhsX[2][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k] - lhsX[2][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k] - lhsX[2][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k] - lhsX[2][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k]; lhsX[3][2][BB][i][j][k] = lhsX[3][2][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k] - lhsX[3][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k] - lhsX[3][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k] - lhsX[3][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k] - lhsX[3][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k]; lhsX[4][2][BB][i][j][k] = lhsX[4][2][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][2][CC][i-1][j][k] - lhsX[4][1][AA][i][j][k]*lhsX[1][2][CC][i-1][j][k] - lhsX[4][2][AA][i][j][k]*lhsX[2][2][CC][i-1][j][k] - lhsX[4][3][AA][i][j][k]*lhsX[3][2][CC][i-1][j][k] - lhsX[4][4][AA][i][j][k]*lhsX[4][2][CC][i-1][j][k]; lhsX[0][3][BB][i][j][k] = lhsX[0][3][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k] - lhsX[0][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k] - lhsX[0][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k] - lhsX[0][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k] - lhsX[0][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k]; lhsX[1][3][BB][i][j][k] = lhsX[1][3][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k] - lhsX[1][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k] - lhsX[1][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k] - lhsX[1][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k] - lhsX[1][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k]; lhsX[2][3][BB][i][j][k] = lhsX[2][3][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k] - lhsX[2][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k] - lhsX[2][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k] - lhsX[2][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k] - lhsX[2][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k]; lhsX[3][3][BB][i][j][k] = lhsX[3][3][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k] - lhsX[3][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k] - lhsX[3][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k] - lhsX[3][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k] - lhsX[3][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k]; lhsX[4][3][BB][i][j][k] = lhsX[4][3][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][3][CC][i-1][j][k] - lhsX[4][1][AA][i][j][k]*lhsX[1][3][CC][i-1][j][k] - lhsX[4][2][AA][i][j][k]*lhsX[2][3][CC][i-1][j][k] - lhsX[4][3][AA][i][j][k]*lhsX[3][3][CC][i-1][j][k] - lhsX[4][4][AA][i][j][k]*lhsX[4][3][CC][i-1][j][k]; lhsX[0][4][BB][i][j][k] = lhsX[0][4][BB][i][j][k] - lhsX[0][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k] - lhsX[0][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k] - lhsX[0][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k] - lhsX[0][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k] - lhsX[0][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k]; lhsX[1][4][BB][i][j][k] = lhsX[1][4][BB][i][j][k] - lhsX[1][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k] - lhsX[1][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k] - lhsX[1][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k] - lhsX[1][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k] - lhsX[1][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k]; lhsX[2][4][BB][i][j][k] = lhsX[2][4][BB][i][j][k] - lhsX[2][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k] - lhsX[2][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k] - lhsX[2][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k] - lhsX[2][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k] - lhsX[2][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k]; lhsX[3][4][BB][i][j][k] = lhsX[3][4][BB][i][j][k] - lhsX[3][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k] - lhsX[3][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k] - lhsX[3][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k] - lhsX[3][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k] - lhsX[3][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k]; lhsX[4][4][BB][i][j][k] = lhsX[4][4][BB][i][j][k] - lhsX[4][0][AA][i][j][k]*lhsX[0][4][CC][i-1][j][k] - lhsX[4][1][AA][i][j][k]*lhsX[1][4][CC][i-1][j][k] - lhsX[4][2][AA][i][j][k]*lhsX[2][4][CC][i-1][j][k] - lhsX[4][3][AA][i][j][k]*lhsX[3][4][CC][i-1][j][k] - lhsX[4][4][AA][i][j][k]*lhsX[4][4][CC][i-1][j][k]; //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs //------------------------------------------------------------------- //binvcrhs( lhsX[i][j][BB], lhsX[k][i][j][k][CC], rhs[k][j][i] ); /* for(m = 0; m < 5; m++){ pivot = 1.00/lhsX[m][m][BB][i][j][k]; for(n = m+1; n < 5; n++){ lhsX[m][n][BB][i][j][k] = lhsX[m][n][BB][i][j][k]*pivot; } lhsX[m][0][CC][i][j][k] = lhsX[m][0][CC][i][j][k]*pivot; lhsX[m][1][CC][i][j][k] = lhsX[m][1][CC][i][j][k]*pivot; lhsX[m][2][CC][i][j][k] = lhsX[m][2][CC][i][j][k]*pivot; lhsX[m][3][CC][i][j][k] = lhsX[m][3][CC][i][j][k]*pivot; lhsX[m][4][CC][i][j][k] = lhsX[m][4][CC][i][j][k]*pivot; rhs[k][j][i][m] = rhs[k][j][i][m]*pivot; for(n = 0; n < 5; n++){ if(n != m){ coeff = lhsX[n][m][BB][i][j][k]; for(z = m+1; z < 5; z++){ lhsX[n][z][BB][i][j][k] = lhsX[n][z][BB][i][j][k] - coeff*lhsX[m][z][BB][i][j][k]; } lhsX[n][0][CC][i][j][k] = lhsX[n][0][CC][i][j][k] - coeff*lhsX[m][0][CC][i][j][k]; lhsX[n][1][CC][i][j][k] = lhsX[n][1][CC][i][j][k] - coeff*lhsX[m][1][CC][i][j][k]; lhsX[n][2][CC][i][j][k] = lhsX[n][2][CC][i][j][k] - coeff*lhsX[m][2][CC][i][j][k]; lhsX[n][3][CC][i][j][k] = lhsX[n][3][CC][i][j][k] - coeff*lhsX[m][3][CC][i][j][k]; lhsX[n][4][CC][i][j][k] = lhsX[n][4][CC][i][j][k] - coeff*lhsX[m][4][CC][i][j][k]; rhs[k][j][i][n] = rhs[k][j][i][n] - coeff*rhs[k][j][i][m]; } } } */ pivot = 1.00/lhsX[0][0][BB][i][j][k]; lhsX[0][1][BB][i][j][k] = lhsX[0][1][BB][i][j][k]*pivot; lhsX[0][2][BB][i][j][k] = lhsX[0][2][BB][i][j][k]*pivot; lhsX[0][3][BB][i][j][k] = lhsX[0][3][BB][i][j][k]*pivot; lhsX[0][4][BB][i][j][k] = lhsX[0][4][BB][i][j][k]*pivot; lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k]*pivot; lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k]*pivot; lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k]*pivot; lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k]*pivot; lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k]*pivot; rhs[k][j][i][0] = rhs[k][j][i][0] *pivot; coeff = lhsX[1][0][BB][i][j][k]; lhsX[1][1][BB][i][j][k]= lhsX[1][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k]; lhsX[1][2][BB][i][j][k]= lhsX[1][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k]; lhsX[1][3][BB][i][j][k]= lhsX[1][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k]; lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k]; lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k]; lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k]; lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k]; lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k]; lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][0]; coeff = lhsX[2][0][BB][i][j][k]; lhsX[2][1][BB][i][j][k]= lhsX[2][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k]; lhsX[2][2][BB][i][j][k]= lhsX[2][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k]; lhsX[2][3][BB][i][j][k]= lhsX[2][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k]; lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k]; lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k]; lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k]; lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k]; lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k]; lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][0]; coeff = lhsX[3][0][BB][i][j][k]; lhsX[3][1][BB][i][j][k]= lhsX[3][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k]; lhsX[3][2][BB][i][j][k]= lhsX[3][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k]; lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k]; lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k]; lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k]; lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k]; lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k]; lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k]; lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][0]; coeff = lhsX[4][0][BB][i][j][k]; lhsX[4][1][BB][i][j][k]= lhsX[4][1][BB][i][j][k] - coeff*lhsX[0][1][BB][i][j][k]; lhsX[4][2][BB][i][j][k]= lhsX[4][2][BB][i][j][k] - coeff*lhsX[0][2][BB][i][j][k]; lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[0][3][BB][i][j][k]; lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[0][4][BB][i][j][k]; lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[0][0][CC][i][j][k]; lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[0][1][CC][i][j][k]; lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[0][2][CC][i][j][k]; lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[0][3][CC][i][j][k]; lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[0][4][CC][i][j][k]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][0]; pivot = 1.00/lhsX[1][1][BB][i][j][k]; lhsX[1][2][BB][i][j][k] = lhsX[1][2][BB][i][j][k]*pivot; lhsX[1][3][BB][i][j][k] = lhsX[1][3][BB][i][j][k]*pivot; lhsX[1][4][BB][i][j][k] = lhsX[1][4][BB][i][j][k]*pivot; lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k]*pivot; lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k]*pivot; lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k]*pivot; lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k]*pivot; lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k]*pivot; rhs[k][j][i][1] = rhs[k][j][i][1] *pivot; coeff = lhsX[0][1][BB][i][j][k]; lhsX[0][2][BB][i][j][k]= lhsX[0][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k]; lhsX[0][3][BB][i][j][k]= lhsX[0][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k]; lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k]; lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k]; lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k]; lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k]; lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k]; lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][1]; coeff = lhsX[2][1][BB][i][j][k]; lhsX[2][2][BB][i][j][k]= lhsX[2][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k]; lhsX[2][3][BB][i][j][k]= lhsX[2][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k]; lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k]; lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k]; lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k]; lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k]; lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k]; lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][1]; coeff = lhsX[3][1][BB][i][j][k]; lhsX[3][2][BB][i][j][k]= lhsX[3][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k]; lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k]; lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k]; lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k]; lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k]; lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k]; lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k]; lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][1]; coeff = lhsX[4][1][BB][i][j][k]; lhsX[4][2][BB][i][j][k]= lhsX[4][2][BB][i][j][k] - coeff*lhsX[1][2][BB][i][j][k]; lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[1][3][BB][i][j][k]; lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[1][4][BB][i][j][k]; lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[1][0][CC][i][j][k]; lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[1][1][CC][i][j][k]; lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[1][2][CC][i][j][k]; lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[1][3][CC][i][j][k]; lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[1][4][CC][i][j][k]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][1]; pivot = 1.00/lhsX[2][2][BB][i][j][k]; lhsX[2][3][BB][i][j][k] = lhsX[2][3][BB][i][j][k]*pivot; lhsX[2][4][BB][i][j][k] = lhsX[2][4][BB][i][j][k]*pivot; lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k]*pivot; lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k]*pivot; lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k]*pivot; lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k]*pivot; lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k]*pivot; rhs[k][j][i][2] = rhs[k][j][i][2] *pivot; coeff = lhsX[0][2][BB][i][j][k]; lhsX[0][3][BB][i][j][k]= lhsX[0][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k]; lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k]; lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k]; lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k]; lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k]; lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k]; lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][2]; coeff = lhsX[1][2][BB][i][j][k]; lhsX[1][3][BB][i][j][k]= lhsX[1][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k]; lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k]; lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k]; lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k]; lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k]; lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k]; lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][2]; coeff = lhsX[3][2][BB][i][j][k]; lhsX[3][3][BB][i][j][k]= lhsX[3][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k]; lhsX[3][4][BB][i][j][k]= lhsX[3][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k]; lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k]; lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k]; lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k]; lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k]; lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][2]; coeff = lhsX[4][2][BB][i][j][k]; lhsX[4][3][BB][i][j][k]= lhsX[4][3][BB][i][j][k] - coeff*lhsX[2][3][BB][i][j][k]; lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[2][4][BB][i][j][k]; lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[2][0][CC][i][j][k]; lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[2][1][CC][i][j][k]; lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[2][2][CC][i][j][k]; lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[2][3][CC][i][j][k]; lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[2][4][CC][i][j][k]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][2]; pivot = 1.00/lhsX[3][3][BB][i][j][k]; lhsX[3][4][BB][i][j][k] = lhsX[3][4][BB][i][j][k]*pivot; lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k]*pivot; lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k]*pivot; lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k]*pivot; lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k]*pivot; lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k]*pivot; rhs[k][j][i][3] = rhs[k][j][i][3] *pivot; coeff = lhsX[0][3][BB][i][j][k]; lhsX[0][4][BB][i][j][k]= lhsX[0][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k]; lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k]; lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k]; lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k]; lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k]; lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][3]; coeff = lhsX[1][3][BB][i][j][k]; lhsX[1][4][BB][i][j][k]= lhsX[1][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k]; lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k]; lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k]; lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k]; lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k]; lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][3]; coeff = lhsX[2][3][BB][i][j][k]; lhsX[2][4][BB][i][j][k]= lhsX[2][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k]; lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k]; lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k]; lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k]; lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k]; lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][3]; coeff = lhsX[4][3][BB][i][j][k]; lhsX[4][4][BB][i][j][k]= lhsX[4][4][BB][i][j][k] - coeff*lhsX[3][4][BB][i][j][k]; lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k] - coeff*lhsX[3][0][CC][i][j][k]; lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k] - coeff*lhsX[3][1][CC][i][j][k]; lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k] - coeff*lhsX[3][2][CC][i][j][k]; lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k] - coeff*lhsX[3][3][CC][i][j][k]; lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k] - coeff*lhsX[3][4][CC][i][j][k]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][3]; pivot = 1.00/lhsX[4][4][BB][i][j][k]; lhsX[4][0][CC][i][j][k] = lhsX[4][0][CC][i][j][k]*pivot; lhsX[4][1][CC][i][j][k] = lhsX[4][1][CC][i][j][k]*pivot; lhsX[4][2][CC][i][j][k] = lhsX[4][2][CC][i][j][k]*pivot; lhsX[4][3][CC][i][j][k] = lhsX[4][3][CC][i][j][k]*pivot; lhsX[4][4][CC][i][j][k] = lhsX[4][4][CC][i][j][k]*pivot; rhs[k][j][i][4] = rhs[k][j][i][4] *pivot; coeff = lhsX[0][4][BB][i][j][k]; lhsX[0][0][CC][i][j][k] = lhsX[0][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k]; lhsX[0][1][CC][i][j][k] = lhsX[0][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k]; lhsX[0][2][CC][i][j][k] = lhsX[0][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k]; lhsX[0][3][CC][i][j][k] = lhsX[0][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k]; lhsX[0][4][CC][i][j][k] = lhsX[0][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][4]; coeff = lhsX[1][4][BB][i][j][k]; lhsX[1][0][CC][i][j][k] = lhsX[1][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k]; lhsX[1][1][CC][i][j][k] = lhsX[1][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k]; lhsX[1][2][CC][i][j][k] = lhsX[1][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k]; lhsX[1][3][CC][i][j][k] = lhsX[1][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k]; lhsX[1][4][CC][i][j][k] = lhsX[1][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][4]; coeff = lhsX[2][4][BB][i][j][k]; lhsX[2][0][CC][i][j][k] = lhsX[2][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k]; lhsX[2][1][CC][i][j][k] = lhsX[2][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k]; lhsX[2][2][CC][i][j][k] = lhsX[2][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k]; lhsX[2][3][CC][i][j][k] = lhsX[2][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k]; lhsX[2][4][CC][i][j][k] = lhsX[2][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][4]; coeff = lhsX[3][4][BB][i][j][k]; lhsX[3][0][CC][i][j][k] = lhsX[3][0][CC][i][j][k] - coeff*lhsX[4][0][CC][i][j][k]; lhsX[3][1][CC][i][j][k] = lhsX[3][1][CC][i][j][k] - coeff*lhsX[4][1][CC][i][j][k]; lhsX[3][2][CC][i][j][k] = lhsX[3][2][CC][i][j][k] - coeff*lhsX[4][2][CC][i][j][k]; lhsX[3][3][CC][i][j][k] = lhsX[3][3][CC][i][j][k] - coeff*lhsX[4][3][CC][i][j][k]; lhsX[3][4][CC][i][j][k] = lhsX[3][4][CC][i][j][k] - coeff*lhsX[4][4][CC][i][j][k]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][4]; }/*end i*/ } } //--------------------------------------------------------------------- // rhs(isize) = rhs(isize) - A*rhs(isize-1) //--------------------------------------------------------------------- //matvec_sub(lhsX[isize-1][j][AA], rhs[k][isize][j][k], rhs[k][j][isize]); #pragma omp target teams distribute parallel for collapse(2) private(k,j) for (k = 1; k <= gp22; k++) { for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ rhs[k][j][isize][m] = rhs[k][j][isize][m] - lhsX[m][0][AA][isize][j][k]*rhs[k][j][isize-1][0] - lhsX[m][1][AA][isize][j][k]*rhs[k][j][isize-1][1] - lhsX[m][2][AA][isize][j][k]*rhs[k][j][isize-1][2] - lhsX[m][3][AA][isize][j][k]*rhs[k][j][isize-1][3] - lhsX[m][4][AA][isize][j][k]*rhs[k][j][isize-1][4]; } */ rhs[k][j][isize][0] = rhs[k][j][isize][0] - lhsX[0][0][AA][isize][j][k]*rhs[k][j][isize-1][0] - lhsX[0][1][AA][isize][j][k]*rhs[k][j][isize-1][1] - lhsX[0][2][AA][isize][j][k]*rhs[k][j][isize-1][2] - lhsX[0][3][AA][isize][j][k]*rhs[k][j][isize-1][3] - lhsX[0][4][AA][isize][j][k]*rhs[k][j][isize-1][4]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - lhsX[1][0][AA][isize][j][k]*rhs[k][j][isize-1][0] - lhsX[1][1][AA][isize][j][k]*rhs[k][j][isize-1][1] - lhsX[1][2][AA][isize][j][k]*rhs[k][j][isize-1][2] - lhsX[1][3][AA][isize][j][k]*rhs[k][j][isize-1][3] - lhsX[1][4][AA][isize][j][k]*rhs[k][j][isize-1][4]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - lhsX[2][0][AA][isize][j][k]*rhs[k][j][isize-1][0] - lhsX[2][1][AA][isize][j][k]*rhs[k][j][isize-1][1] - lhsX[2][2][AA][isize][j][k]*rhs[k][j][isize-1][2] - lhsX[2][3][AA][isize][j][k]*rhs[k][j][isize-1][3] - lhsX[2][4][AA][isize][j][k]*rhs[k][j][isize-1][4]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - lhsX[3][0][AA][isize][j][k]*rhs[k][j][isize-1][0] - lhsX[3][1][AA][isize][j][k]*rhs[k][j][isize-1][1] - lhsX[3][2][AA][isize][j][k]*rhs[k][j][isize-1][2] - lhsX[3][3][AA][isize][j][k]*rhs[k][j][isize-1][3] - lhsX[3][4][AA][isize][j][k]*rhs[k][j][isize-1][4]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - lhsX[4][0][AA][isize][j][k]*rhs[k][j][isize-1][0] - lhsX[4][1][AA][isize][j][k]*rhs[k][j][isize-1][1] - lhsX[4][2][AA][isize][j][k]*rhs[k][j][isize-1][2] - lhsX[4][3][AA][isize][j][k]*rhs[k][j][isize-1][3] - lhsX[4][4][AA][isize][j][k]*rhs[k][j][isize-1][4]; } } //--------------------------------------------------------------------- // B(isize) = B(isize) - C(isize-1)*A(isize) //--------------------------------------------------------------------- //matmul_sub(lhsX[isize-1][j][AA], lhsX[k][isize][j][k][CC], lhsX[k][j][isize][BB]); #pragma omp target teams distribute parallel for collapse(2) private(k,j) for (k = 1; k <= gp22; k++) { for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ for(n = 0; n < 5; n++){ lhsX[n][m][BB][isize][j][k] = lhsX[n][m][BB][isize][j][k] - lhsX[n][0][AA][isize][j][k]*lhsX[0][m][CC][isize-1][j][k] - lhsX[n][1][AA][isize][j][k]*lhsX[1][m][CC][isize-1][j][k] - lhsX[n][2][AA][isize][j][k]*lhsX[2][m][CC][isize-1][j][k] - lhsX[n][3][AA][isize][j][k]*lhsX[3][m][CC][isize-1][j][k] - lhsX[n][4][AA][isize][j][k]*lhsX[4][m][CC][isize-1][j][k]; } } */ lhsX[0][0][BB][isize][j][k] = lhsX[0][0][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k] - lhsX[0][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k] - lhsX[0][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k] - lhsX[0][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k] - lhsX[0][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k]; lhsX[1][0][BB][isize][j][k] = lhsX[1][0][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k] - lhsX[1][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k] - lhsX[1][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k] - lhsX[1][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k] - lhsX[1][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k]; lhsX[2][0][BB][isize][j][k] = lhsX[2][0][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k] - lhsX[2][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k] - lhsX[2][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k] - lhsX[2][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k] - lhsX[2][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k]; lhsX[3][0][BB][isize][j][k] = lhsX[3][0][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k] - lhsX[3][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k] - lhsX[3][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k] - lhsX[3][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k] - lhsX[3][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k]; lhsX[4][0][BB][isize][j][k] = lhsX[4][0][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][0][CC][isize-1][j][k] - lhsX[4][1][AA][isize][j][k]*lhsX[1][0][CC][isize-1][j][k] - lhsX[4][2][AA][isize][j][k]*lhsX[2][0][CC][isize-1][j][k] - lhsX[4][3][AA][isize][j][k]*lhsX[3][0][CC][isize-1][j][k] - lhsX[4][4][AA][isize][j][k]*lhsX[4][0][CC][isize-1][j][k]; lhsX[0][1][BB][isize][j][k] = lhsX[0][1][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k] - lhsX[0][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k] - lhsX[0][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k] - lhsX[0][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k] - lhsX[0][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k]; lhsX[1][1][BB][isize][j][k] = lhsX[1][1][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k] - lhsX[1][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k] - lhsX[1][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k] - lhsX[1][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k] - lhsX[1][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k]; lhsX[2][1][BB][isize][j][k] = lhsX[2][1][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k] - lhsX[2][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k] - lhsX[2][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k] - lhsX[2][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k] - lhsX[2][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k]; lhsX[3][1][BB][isize][j][k] = lhsX[3][1][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k] - lhsX[3][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k] - lhsX[3][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k] - lhsX[3][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k] - lhsX[3][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k]; lhsX[4][1][BB][isize][j][k] = lhsX[4][1][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][1][CC][isize-1][j][k] - lhsX[4][1][AA][isize][j][k]*lhsX[1][1][CC][isize-1][j][k] - lhsX[4][2][AA][isize][j][k]*lhsX[2][1][CC][isize-1][j][k] - lhsX[4][3][AA][isize][j][k]*lhsX[3][1][CC][isize-1][j][k] - lhsX[4][4][AA][isize][j][k]*lhsX[4][1][CC][isize-1][j][k]; lhsX[0][2][BB][isize][j][k] = lhsX[0][2][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k] - lhsX[0][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k] - lhsX[0][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k] - lhsX[0][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k] - lhsX[0][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k]; lhsX[1][2][BB][isize][j][k] = lhsX[1][2][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k] - lhsX[1][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k] - lhsX[1][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k] - lhsX[1][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k] - lhsX[1][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k]; lhsX[2][2][BB][isize][j][k] = lhsX[2][2][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k] - lhsX[2][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k] - lhsX[2][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k] - lhsX[2][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k] - lhsX[2][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k]; lhsX[3][2][BB][isize][j][k] = lhsX[3][2][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k] - lhsX[3][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k] - lhsX[3][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k] - lhsX[3][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k] - lhsX[3][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k]; lhsX[4][2][BB][isize][j][k] = lhsX[4][2][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][2][CC][isize-1][j][k] - lhsX[4][1][AA][isize][j][k]*lhsX[1][2][CC][isize-1][j][k] - lhsX[4][2][AA][isize][j][k]*lhsX[2][2][CC][isize-1][j][k] - lhsX[4][3][AA][isize][j][k]*lhsX[3][2][CC][isize-1][j][k] - lhsX[4][4][AA][isize][j][k]*lhsX[4][2][CC][isize-1][j][k]; lhsX[0][3][BB][isize][j][k] = lhsX[0][3][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k] - lhsX[0][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k] - lhsX[0][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k] - lhsX[0][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k] - lhsX[0][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k]; lhsX[1][3][BB][isize][j][k] = lhsX[1][3][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k] - lhsX[1][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k] - lhsX[1][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k] - lhsX[1][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k] - lhsX[1][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k]; lhsX[2][3][BB][isize][j][k] = lhsX[2][3][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k] - lhsX[2][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k] - lhsX[2][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k] - lhsX[2][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k] - lhsX[2][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k]; lhsX[3][3][BB][isize][j][k] = lhsX[3][3][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k] - lhsX[3][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k] - lhsX[3][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k] - lhsX[3][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k] - lhsX[3][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k]; lhsX[4][3][BB][isize][j][k] = lhsX[4][3][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][3][CC][isize-1][j][k] - lhsX[4][1][AA][isize][j][k]*lhsX[1][3][CC][isize-1][j][k] - lhsX[4][2][AA][isize][j][k]*lhsX[2][3][CC][isize-1][j][k] - lhsX[4][3][AA][isize][j][k]*lhsX[3][3][CC][isize-1][j][k] - lhsX[4][4][AA][isize][j][k]*lhsX[4][3][CC][isize-1][j][k]; lhsX[0][4][BB][isize][j][k] = lhsX[0][4][BB][isize][j][k] - lhsX[0][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k] - lhsX[0][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k] - lhsX[0][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k] - lhsX[0][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k] - lhsX[0][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k]; lhsX[1][4][BB][isize][j][k] = lhsX[1][4][BB][isize][j][k] - lhsX[1][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k] - lhsX[1][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k] - lhsX[1][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k] - lhsX[1][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k] - lhsX[1][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k]; lhsX[2][4][BB][isize][j][k] = lhsX[2][4][BB][isize][j][k] - lhsX[2][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k] - lhsX[2][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k] - lhsX[2][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k] - lhsX[2][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k] - lhsX[2][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k]; lhsX[3][4][BB][isize][j][k] = lhsX[3][4][BB][isize][j][k] - lhsX[3][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k] - lhsX[3][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k] - lhsX[3][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k] - lhsX[3][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k] - lhsX[3][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k]; lhsX[4][4][BB][isize][j][k] = lhsX[4][4][BB][isize][j][k] - lhsX[4][0][AA][isize][j][k]*lhsX[0][4][CC][isize-1][j][k] - lhsX[4][1][AA][isize][j][k]*lhsX[1][4][CC][isize-1][j][k] - lhsX[4][2][AA][isize][j][k]*lhsX[2][4][CC][isize-1][j][k] - lhsX[4][3][AA][isize][j][k]*lhsX[3][4][CC][isize-1][j][k] - lhsX[4][4][AA][isize][j][k]*lhsX[4][4][CC][isize-1][j][k]; } } //--------------------------------------------------------------------- // multiply rhs() by b_inverse() and copy to rhs //--------------------------------------------------------------------- //binvrhs( lhsX[isize][j][BB], rhs[k][isize][j][k] ); #pragma omp target teams distribute parallel for private(j,k,pivot,coeff) for (k = 1; k <= gp22; k++) { for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ pivot = 1.00/lhsX[m][m][BB][isize][j][k]; for(n = m+1; n < 5; n++){ lhsX[m][n][BB][isize][j][k] = lhsX[m][n][BB][isize][j][k]*pivot; } rhs[k][j][isize][m] = rhs[k][j][isize][m]*pivot; for(n = 0; n < 5; n++){ if(n != m){ coeff = lhsX[n][m][BB][isize][j][k]; for(z = m+1; z < 5; z++){ lhsX[n][z][BB][isize][j][k] = lhsX[n][z][BB][isize][j][k] - coeff*lhsX[m][z][BB][isize][j][k]; } rhs[k][j][isize][n] = rhs[k][j][isize][n] - coeff*rhs[k][j][isize][m]; } } } */ pivot = 1.00/lhsX[0][0][BB][isize][j][k]; lhsX[0][1][BB][isize][j][k] = lhsX[0][1][BB][isize][j][k]*pivot; lhsX[0][2][BB][isize][j][k] = lhsX[0][2][BB][isize][j][k]*pivot; lhsX[0][3][BB][isize][j][k] = lhsX[0][3][BB][isize][j][k]*pivot; lhsX[0][4][BB][isize][j][k] = lhsX[0][4][BB][isize][j][k]*pivot; rhs[k][j][isize][0] = rhs[k][j][isize][0] *pivot; coeff = lhsX[1][0][BB][isize][j][k]; lhsX[1][1][BB][isize][j][k]= lhsX[1][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k]; lhsX[1][2][BB][isize][j][k]= lhsX[1][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k]; lhsX[1][3][BB][isize][j][k]= lhsX[1][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k]; lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][0]; coeff = lhsX[2][0][BB][isize][j][k]; lhsX[2][1][BB][isize][j][k]= lhsX[2][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k]; lhsX[2][2][BB][isize][j][k]= lhsX[2][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k]; lhsX[2][3][BB][isize][j][k]= lhsX[2][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k]; lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][0]; coeff = lhsX[3][0][BB][isize][j][k]; lhsX[3][1][BB][isize][j][k]= lhsX[3][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k]; lhsX[3][2][BB][isize][j][k]= lhsX[3][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k]; lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k]; lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][0]; coeff = lhsX[4][0][BB][isize][j][k]; lhsX[4][1][BB][isize][j][k]= lhsX[4][1][BB][isize][j][k] - coeff*lhsX[0][1][BB][isize][j][k]; lhsX[4][2][BB][isize][j][k]= lhsX[4][2][BB][isize][j][k] - coeff*lhsX[0][2][BB][isize][j][k]; lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[0][3][BB][isize][j][k]; lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[0][4][BB][isize][j][k]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][0]; pivot = 1.00/lhsX[1][1][BB][isize][j][k]; lhsX[1][2][BB][isize][j][k] = lhsX[1][2][BB][isize][j][k]*pivot; lhsX[1][3][BB][isize][j][k] = lhsX[1][3][BB][isize][j][k]*pivot; lhsX[1][4][BB][isize][j][k] = lhsX[1][4][BB][isize][j][k]*pivot; rhs[k][j][isize][1] = rhs[k][j][isize][1] *pivot; coeff = lhsX[0][1][BB][isize][j][k]; lhsX[0][2][BB][isize][j][k]= lhsX[0][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k]; lhsX[0][3][BB][isize][j][k]= lhsX[0][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k]; lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][1]; coeff = lhsX[2][1][BB][isize][j][k]; lhsX[2][2][BB][isize][j][k]= lhsX[2][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k]; lhsX[2][3][BB][isize][j][k]= lhsX[2][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k]; lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][1]; coeff = lhsX[3][1][BB][isize][j][k]; lhsX[3][2][BB][isize][j][k]= lhsX[3][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k]; lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k]; lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][1]; coeff = lhsX[4][1][BB][isize][j][k]; lhsX[4][2][BB][isize][j][k]= lhsX[4][2][BB][isize][j][k] - coeff*lhsX[1][2][BB][isize][j][k]; lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[1][3][BB][isize][j][k]; lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[1][4][BB][isize][j][k]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][1]; pivot = 1.00/lhsX[2][2][BB][isize][j][k]; lhsX[2][3][BB][isize][j][k] = lhsX[2][3][BB][isize][j][k]*pivot; lhsX[2][4][BB][isize][j][k] = lhsX[2][4][BB][isize][j][k]*pivot; rhs[k][j][isize][2] = rhs[k][j][isize][2] *pivot; coeff = lhsX[0][2][BB][isize][j][k]; lhsX[0][3][BB][isize][j][k]= lhsX[0][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k]; lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][2]; coeff = lhsX[1][2][BB][isize][j][k]; lhsX[1][3][BB][isize][j][k]= lhsX[1][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k]; lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][2]; coeff = lhsX[3][2][BB][isize][j][k]; lhsX[3][3][BB][isize][j][k]= lhsX[3][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k]; lhsX[3][4][BB][isize][j][k]= lhsX[3][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][2]; coeff = lhsX[4][2][BB][isize][j][k]; lhsX[4][3][BB][isize][j][k]= lhsX[4][3][BB][isize][j][k] - coeff*lhsX[2][3][BB][isize][j][k]; lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[2][4][BB][isize][j][k]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][2]; pivot = 1.00/lhsX[3][3][BB][isize][j][k]; lhsX[3][4][BB][isize][j][k] = lhsX[3][4][BB][isize][j][k]*pivot; rhs[k][j][isize][3] = rhs[k][j][isize][3] *pivot; coeff = lhsX[0][3][BB][isize][j][k]; lhsX[0][4][BB][isize][j][k]= lhsX[0][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][3]; coeff = lhsX[1][3][BB][isize][j][k]; lhsX[1][4][BB][isize][j][k]= lhsX[1][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][3]; coeff = lhsX[2][3][BB][isize][j][k]; lhsX[2][4][BB][isize][j][k]= lhsX[2][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][3]; coeff = lhsX[4][3][BB][isize][j][k]; lhsX[4][4][BB][isize][j][k]= lhsX[4][4][BB][isize][j][k] - coeff*lhsX[3][4][BB][isize][j][k]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff*rhs[k][j][isize][3]; pivot = 1.00/lhsX[4][4][BB][isize][j][k]; rhs[k][j][isize][4] = rhs[k][j][isize][4] *pivot; coeff = lhsX[0][4][BB][isize][j][k]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff*rhs[k][j][isize][4]; coeff = lhsX[1][4][BB][isize][j][k]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff*rhs[k][j][isize][4]; coeff = lhsX[2][4][BB][isize][j][k]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff*rhs[k][j][isize][4]; coeff = lhsX[3][4][BB][isize][j][k]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff*rhs[k][j][isize][4]; } } //--------------------------------------------------------------------- // back solve: if last cell, then generate U(isize)=rhs(isize) // else assume U(isize) is loaded in un pack backsub_info // so just use it // after u(istart) will be sent to next cell //--------------------------------------------------------------------- #pragma omp target teams distribute parallel for collapse(2) private(i,j,k,m,n) for (k = 1; k <= gp22; k++) { for (j = 1; j <= gp12; j++) { for (i = isize-1; i >=0; i--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhsX[m][n][CC][i][j][k]*rhs[k][j][i+1][n]; } } } } } }/*end omp target data */ }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; v4f32 _v5_25 = __msa_fill_w_f32(5.25f); v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f); v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f); v4f32 _v0_25 = __msa_fill_w_f32(0.25f); v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f); v4f32 _v0_5 = __msa_fill_w_f32(0.5f); v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02)); v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05)); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp7m, tmp[7][m], 0); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03); v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05); v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05); v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); __msa_st_w((v4i32)_tmp6m, tmp[6][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02)); v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05)); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03); v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05); v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b); v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05); v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); __msa_st_w((v4i32)_r0tm6, r0_tm_6, 0); __msa_st_w((v4i32)_r0tm7, r0_tm_7, 0); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { v4f32 _val = (v4f32)__msa_ld_w(r0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); v4f32 _sum8 = (v4f32)__msa_fill_w(0); v4f32 _sum9 = (v4f32)__msa_fill_w(0); v4f32 _suma = (v4f32)__msa_fill_w(0); v4f32 _sumb = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 96); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4i32 _val89ab = __msa_ld_w(r0 + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); r0 += 12; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); __msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0); __msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0); __msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0); __msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 64); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); r0 += 8; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); r0 += 4; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _val1 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 8); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); k0 += 4; } __msa_st_w((v4i32)_sum, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[6][8][4]; v4f32 _v32 = __msa_fill_w_f32(32.f); v4f32 _v16 = __msa_fill_w_f32(16.f); v4f32 _v8 = __msa_fill_w_f32(8.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; // TODO msa optimize for (int m = 0; m < 8; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0); v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0); v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6); v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c); v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c); v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c); v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06); v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c))); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c)); v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out04, output0 + 4 * 4, 0); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c)); v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b))); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); __msa_st_w((v4i32)_out05, output0 + 4 * 5, 0); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; v4f32 _vm5 = __msa_fill_w_f32(-5.f); v4f32 _vm4 = __msa_fill_w_f32(-4.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _vm2 = __msa_fill_w_f32(-2.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02); v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02)); v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02); v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02)); v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02)); v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { v4f32 _val = (v4f32)__msa_ld_w(r0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); v4f32 _sum8 = (v4f32)__msa_fill_w(0); v4f32 _sum9 = (v4f32)__msa_fill_w(0); v4f32 _suma = (v4f32)__msa_fill_w(0); v4f32 _sumb = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 96); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4i32 _val89ab = __msa_ld_w(r0 + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); r0 += 12; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); __msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0); __msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0); __msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0); __msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 64); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); r0 += 8; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); r0 += 4; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _val1 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 8); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); k0 += 4; } __msa_st_w((v4i32)_sum, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[4][6][4]; v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v8 = __msa_fill_w_f32(8.f); // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; // TODO msa optimize for (int m = 0; m < 6; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b); v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b); v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b); v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b)); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b)); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
bucle-forModificado.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif int main(int argc, char ** argv) { int i, n = 9; if(argc < 2) { fprintf(stderr,"\n[ERROR] - Falta nº iteraciones \n"); exit(-1); } n = atoi(argv[1]); #ifdef _OPENMP #pragma omp parallel for #endif for (i=0; i<n; i++) printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i); return(0); }
GB_unaryop__identity_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint64_fp32 // op(A') function: GB_tran__identity_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reductionOMP.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int Suma(int a, int b); int sumaglobal=0; int main(int argc, char *argv[]){ int numeroDeHilos=strtol(argv[1],NULL,10); int resultado=0; int a=10, b=20; #pragma omp parallel num_threads(numeroDeHilos) \ reduction(+ : resultado) resultado+=Suma(a,b); printf("El resultado de la operación es %d\n", resultado); return 0; } int Suma(int a, int b){ int my_rank=omp_get_thread_num(); int numeroDeHilos=omp_get_num_threads(); int mi_suma=a+b; printf("Suma del hilo %d de %d: %d\n",my_rank, numeroDeHilos, mi_suma); return mi_suma; }
flow_cutter.h
#ifndef FLOW_CUTTER_H #define FLOW_CUTTER_H #include "tiny_id_func.h" #include "array_id_func.h" #include "id_string.h" #include "id_func.h" #include "dijkstra.h" #include "min_max.h" #include <vector> #include <algorithm> #include <sstream> #include <random> #include <memory> #include <omp.h> #include "flow_cutter_config.h" #include <iostream> #include <iomanip> using namespace std; namespace flow_cutter{ template<class Tail, class Head, class BackArc, class ArcWeight, class Capacity, class OutArc> struct Graph{ Graph( Tail tail, Head head, BackArc back_arc, ArcWeight arc_weight, Capacity capacity, OutArc out_arc ): tail(std::move(tail)), head(std::move(head)), back_arc(std::move(back_arc)), arc_weight(std::move(arc_weight)), capacity(std::move(capacity)), out_arc(std::move(out_arc)){} Tail tail; Head head; BackArc back_arc; //NodeWeight node_weight; ArcWeight arc_weight; Capacity capacity; OutArc out_arc; int node_count()const{ return tail.image_count(); } int arc_count()const{ return tail.preimage_count(); } }; //! Each threads needs its own TemporaryData object. struct TemporaryData{ TemporaryData(){} explicit TemporaryData(int node_count): node_space(node_count){} ArrayIDFunc<int>node_space; }; template<class Tail, class Head, class BackArc, class ArcWeight, class Capacity, class OutArc> Graph<Tail, Head, BackArc, ArcWeight, Capacity, OutArc> make_graph( Tail tail, Head head, BackArc back_arc, ArcWeight arc_weight, Capacity capacity, OutArc out_arc ){ return {std::move(tail), std::move(head), std::move(back_arc), std::move(arc_weight), std::move(capacity), std::move(out_arc)}; } template<class Tail, class Head, class BackArc, class OutArc> Graph< Tail, Head, BackArc, ConstIntIDFunc<1>, ConstIntIDFunc<1>, OutArc > make_graph( const Tail&tail, const Head&head, const BackArc&back_arc, const OutArc&out_arc ){ return { std::move(tail), std::move(head), std::move(back_arc), ConstIntIDFunc<1>(tail.preimage_count()), ConstIntIDFunc<1>(tail.preimage_count()), std::move(out_arc) }; } class PseudoDepthFirstSearch{ public: template<class Graph, class WasNodeSeen, class SeeNode, class ShouldFollowArc, class OnNewArc> void operator()( const Graph&graph, TemporaryData&tmp, int source_node, const WasNodeSeen&was_node_seen, const SeeNode&see_node, const ShouldFollowArc&should_follow_arc, const OnNewArc&on_new_arc )const{ int stack_end = 1; auto&stack = tmp.node_space; stack[0] = source_node; while(stack_end != 0){ int x = stack[--stack_end]; for(auto xy : graph.out_arc(x)){ on_new_arc(xy); int y = graph.head(xy); if(!was_node_seen(y)){ if(should_follow_arc(xy)){ if(!see_node(y)) return; stack[stack_end++] = y; } } } } } }; class BreadthFirstSearch{ public: template<class Graph, class WasNodeSeen, class SeeNode, class ShouldFollowArc, class OnNewArc> void operator()( const Graph&graph, TemporaryData&tmp, int source_node, const WasNodeSeen&was_node_seen, const SeeNode&see_node, const ShouldFollowArc&should_follow_arc, const OnNewArc&on_new_arc )const{ int queue_begin = 0, queue_end = 1; auto&queue = tmp.node_space; queue[0] = source_node; while(queue_begin != queue_end){ int x = queue[queue_begin++]; for(auto xy : graph.out_arc(x)){ on_new_arc(xy); int y = graph.head(xy); if(!was_node_seen(y)){ if(should_follow_arc(xy)){ if(!see_node(y)) return; queue[queue_end++] = y; } } } } } }; struct UnitFlow{ UnitFlow(){} explicit UnitFlow(int preimage_count):flow(preimage_count){} void clear(){ flow.fill(1); } int preimage_count()const{ return flow.preimage_count(); } template<class Graph> void increase(const Graph&graph, int a){ auto f = flow(a); assert((f == 0 || f == 1) && "Flow is already maximum; can not be increased"); assert(flow(graph.back_arc(a)) == 2-f && "Back arc has invalid flow"); ++f; flow.set(a, f); flow.set(graph.back_arc(a), 2-f); } template<class Graph> void decrease(const Graph&graph, int a){ auto f = flow(a); assert((f == 1 || f == 2) && "Flow is already minimum; can not be decreased"); assert(flow(graph.back_arc(a)) == 2-f && "Back arc has invalid flow"); --f; flow.set(a, f); flow.set(graph.back_arc(a), 2-f); } int operator()(int a)const{ return static_cast<int>(flow(a))-1; } void swap(UnitFlow&o){ flow.swap(o.flow); } TinyIntIDFunc<2>flow; }; class BasicNodeSet{ public: template<class Graph> explicit BasicNodeSet(const Graph&graph): node_count_inside_(0), inside_flag(graph.node_count()), extra_node(-1){} void clear(){ node_count_inside_ = 0; inside_flag.fill(false); } bool can_grow()const{ return extra_node != -1; } template<class Graph, class SearchAlgorithm, class OnNewNode, class ShouldFollowArc, class OnNewArc> void grow( const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const OnNewNode&on_new_node, // on_new_node(x) is called for every node x. If it returns false then the search is stopped, if it returns true it continues const ShouldFollowArc&should_follow_arc, // is called for a subset of arcs and must say whether the arc sould be followed const OnNewArc&on_new_arc // on_new_arc(xy) is called for ever arc xy with x in the set ){ assert(can_grow()); auto see_node = [&](int x){ assert(!inside_flag(x)); inside_flag.set(x, true); ++this->node_count_inside_; return on_new_node(x); }; auto was_node_seen = [&](int x){ return inside_flag(x); }; search_algo(graph, tmp, extra_node, was_node_seen, see_node, should_follow_arc, on_new_arc); extra_node = -1; } template<class Graph> void set_extra_node(const Graph&graph, int x){ assert(!inside_flag(x)); assert(extra_node == -1); inside_flag.set(x, true); ++node_count_inside_; extra_node = x; } bool is_inside(int x) const { return inside_flag(x); } int node_count_inside() const { return node_count_inside_; } int max_node_count_inside() const { return inside_flag.preimage_count(); } private: int node_count_inside_; BitIDFunc inside_flag; int extra_node; }; class ReachableNodeSet; class AssimilatedNodeSet{ friend class ReachableNodeSet; public: template<class Graph> explicit AssimilatedNodeSet(const Graph&graph): node_set(graph){} void clear(){ node_set.clear(); front.clear(); } template<class Graph> void set_extra_node(const Graph&graph, int x){ node_set.set_extra_node(graph, x); } bool can_grow()const{ return node_set.can_grow(); } template<class Graph, class SearchAlgorithm, class OnNewNode, class ShouldFollowArc, class OnNewArc, class HasFlow> void grow( const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const OnNewNode&on_new_node, // on_new_node(x) is called for every node x. If it returns false then the search is stopped, if it returns true it continues const ShouldFollowArc&should_follow_arc, // is called for a subset of arcs and must say whether the arc sould be followed const OnNewArc&on_new_arc, // on_new_arc(xy) is called for ever arc xy with x in the set const HasFlow&has_flow ){ auto my_on_new_arc = [&](int xy){ if(has_flow(xy)) front.push_back(xy); on_new_arc(xy); }; node_set.grow(graph, tmp, search_algo, on_new_node, should_follow_arc, my_on_new_arc); } bool is_inside(int x) const { return node_set.is_inside(x); } int node_count_inside() const { return node_set.node_count_inside(); } int max_node_count_inside() const { return node_set.max_node_count_inside(); } template<class Graph> void shrink_cut_front(const Graph&graph){ front.erase( std::remove_if( front.begin(), front.end(), [&](int xy){ return node_set.is_inside(graph.head(xy)); } ), front.end() ); } const std::vector<int>&get_cut_front() const { return front; } private: BasicNodeSet node_set; std::vector<int>front; }; class ReachableNodeSet{ public: template<class Graph> explicit ReachableNodeSet(const Graph&graph): node_set(graph), predecessor(graph.node_count()){} void reset(const AssimilatedNodeSet&other){ node_set = other.node_set; } void clear(){ node_set.clear(); } template<class Graph> void set_extra_node(const Graph&graph, int x){ node_set.set_extra_node(graph, x); } bool can_grow()const{ return node_set.can_grow(); } template<class Graph, class SearchAlgorithm, class OnNewNode, class ShouldFollowArc, class OnNewArc> void grow( const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const OnNewNode&on_new_node, // on_new_node(x) is called for every node x. If it returns false then the search is stopped, if it returns true it continues const ShouldFollowArc&should_follow_arc, // is called for a subset of arcs and must say whether the arc sould be followed const OnNewArc&on_new_arc // on_new_arc(xy) is called for ever arc xy with x in the set ){ auto my_should_follow_arc = [&](int xy){ predecessor[graph.head(xy)] = xy; return should_follow_arc(xy); }; node_set.grow(graph, tmp, search_algo, on_new_node, my_should_follow_arc, on_new_arc); } bool is_inside(int x) const { return node_set.is_inside(x); } int node_count_inside() const { return node_set.node_count_inside(); } int max_node_count_inside() const { return node_set.max_node_count_inside(); } template<class Graph, class IsSource, class OnNewArc> void forall_arcs_in_path_to(const Graph&graph, const IsSource&is_source, int target, const OnNewArc&on_new_arc){ int x = target; while(!is_source(x)){ on_new_arc(predecessor[x]); x = graph.tail(predecessor[x]); } } private: BasicNodeSet node_set; ArrayIDFunc<int>predecessor; }; struct SourceTargetPair{ int source, target; }; struct CutterStateDump{ BitIDFunc source_assimilated, target_assimilated, source_reachable, target_reachable, flow; }; class BasicCutter{ public: template<class Graph> explicit BasicCutter(const Graph&graph): assimilated{AssimilatedNodeSet(graph), AssimilatedNodeSet(graph)}, reachable{ReachableNodeSet(graph), ReachableNodeSet(graph)}, flow(graph.arc_count()), cut_available(false) {} template<class Graph, class SearchAlgorithm> void init(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, SourceTargetPair p){ assimilated[source_side].clear(); reachable[source_side].clear(); assimilated[target_side].clear(); reachable[target_side].clear(); flow.clear(); assimilated[source_side].set_extra_node(graph, p.source); reachable[source_side].set_extra_node(graph, p.source); assimilated[target_side].set_extra_node(graph, p.target); reachable[target_side].set_extra_node(graph, p.target); grow_reachable_sets(graph, tmp, search_algo, source_side); grow_assimilated_sets(graph, tmp, search_algo); cut_available = true; check_invariants(graph); } CutterStateDump dump_state()const{ return { id_func( assimilated[source_side].max_node_count_inside(), [&](int x){ return assimilated[source_side].is_inside(x); } ), id_func( assimilated[target_side].max_node_count_inside(), [&](int x){ return assimilated[target_side].is_inside(x); } ), id_func( assimilated[source_side].max_node_count_inside(), [&](int x){ return reachable[source_side].is_inside(x); } ), id_func( assimilated[target_side].max_node_count_inside(), [&](int x){ return reachable[target_side].is_inside(x); } ), id_func( flow.preimage_count(), [&](int xy){ return flow(xy) != 0; } ) }; } //! Returns true if a new cut was found. Returns false if no cut was found. False implies that no cut //! will be found in the future. Repeatly calling this function after it returned false does not do //! anything. template<class Graph, class SearchAlgorithm, class ScorePierceNode> bool advance(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node){ assert(cut_available); check_invariants(graph); int side = get_current_cut_side(); if(assimilated[side].node_count_inside() >= graph.node_count()/2){ cut_available = false; return false; } int pierce_node = select_pierce_node(graph, side, score_pierce_node); if(pierce_node == -1){ cut_available = false; return false; } assert(!assimilated[1-side].is_inside(pierce_node)); assimilated[side].set_extra_node(graph, pierce_node); reachable[side].set_extra_node(graph, pierce_node); grow_reachable_sets(graph, tmp, search_algo, side); grow_assimilated_sets(graph, tmp, search_algo); check_invariants(graph); cut_available = true; return true; } bool is_cut_available()const{ return cut_available; } template<class Graph, class ScorePierceNode> bool does_next_advance_increase_cut(const Graph&graph, const ScorePierceNode&score_pierce_node){ int side = get_current_cut_side(); if(assimilated[side].node_count_inside() >= graph.node_count()/2){ return true; } int pierce_node = select_pierce_node(graph, side, score_pierce_node); if(pierce_node == -1) return true; else if(reachable[1-side].is_inside(pierce_node)) return true; else return false; } bool is_on_smaller_side(int x)const{ return assimilated[get_current_cut_side()].is_inside(x); } static const int source_side = 0; static const int target_side = 1; int get_current_cut_side()const{ if( reachable[source_side].node_count_inside() == assimilated[source_side].node_count_inside() && ( reachable[target_side].node_count_inside() != assimilated[target_side].node_count_inside() || assimilated[source_side].node_count_inside() <= assimilated[target_side].node_count_inside() ) ) return source_side; else return target_side; } int get_current_smaller_cut_side_size()const{ return assimilated[get_current_cut_side()].node_count_inside(); } const std::vector<int>&get_current_cut()const{ return assimilated[get_current_cut_side()].get_cut_front(); } int get_assimilated_node_count()const{ return assimilated[source_side].node_count_inside() + assimilated[target_side].node_count_inside(); } private: template<class Graph, class ScorePierceNode> int select_pierce_node(const Graph&graph, int side, const ScorePierceNode&score_pierce_node){ int pierce_node = -1; int max_score = std::numeric_limits<int>::min(); for(auto xy : assimilated[side].get_cut_front()){ int y = graph.head(xy); if(!assimilated[1-side].is_inside(y)){ int score = score_pierce_node(y, side, reachable[1-side].is_inside(y), graph.arc_weight(xy)); if(score > max_score){ max_score = score; pierce_node = y; } } } return pierce_node; } template<class Graph> bool is_saturated(const Graph&graph, int direction, int xy){ if(direction == target_side) xy = graph.back_arc(xy); return graph.capacity(xy) == flow(xy); } template<class Graph, class SearchAlgorithm> void grow_reachable_sets(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, int pierced_side){ int my_source_side = pierced_side; int my_target_side = 1-pierced_side; assert(reachable[pierced_side].can_grow()); auto is_forward_saturated = [&,this](int xy){ return this->is_saturated(graph, my_source_side, xy); }; auto is_backward_saturated = [&,this](int xy){ return this->is_saturated(graph, my_target_side, xy); }; auto is_source = [&](int x){ return assimilated[my_source_side].is_inside(x); }; auto is_target = [&](int x){ return assimilated[my_target_side].is_inside(x); }; auto increase_flow = [&](int xy){ if(pierced_side == source_side) flow.increase(graph, xy); else flow.decrease(graph, xy); }; bool was_flow_augmented = false; int target_hit; do{ target_hit = -1; auto on_new_node = [&](int x){ if(is_target(x)){ target_hit = x; return false; } else return true; }; auto should_follow_arc = [&](int xy){ return !is_forward_saturated(xy); }; auto on_new_arc = [](int xy){}; reachable[my_source_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc); if(target_hit != -1){ check_flow_conservation(graph); reachable[my_source_side].forall_arcs_in_path_to(graph, is_source, target_hit, increase_flow); check_flow_conservation(graph); reachable[my_source_side].reset(assimilated[my_source_side]); was_flow_augmented = true; check_flow_conservation(graph); } }while(target_hit != -1); if(was_flow_augmented){ reachable[my_target_side].reset(assimilated[my_target_side]); auto on_new_node = [&](int x){return true;}; auto should_follow_arc = [&](int xy){ return !is_backward_saturated(xy); }; auto on_new_arc = [](int xy){}; reachable[my_target_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc); } } template<class Graph, class SearchAlgorithm> void grow_assimilated_sets(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo){ auto is_forward_saturated = [&,this](int xy){ return this->is_saturated(graph, source_side, xy); }; auto is_backward_saturated = [&,this](int xy){ return this->is_saturated(graph, target_side, xy); }; if(reachable[source_side].node_count_inside() <= reachable[target_side].node_count_inside()){ auto on_new_node = [&](int x){return true;}; auto should_follow_arc = [&](int xy){ return !is_forward_saturated(xy); }; auto on_new_arc = [](int xy){}; auto has_flow = [&](int xy){ return flow(xy) != 0; }; assimilated[source_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc, has_flow); assimilated[source_side].shrink_cut_front(graph); }else{ auto on_new_node = [&](int x){return true;}; auto should_follow_arc = [&](int xy){ return !is_backward_saturated(xy); }; auto on_new_arc = [](int xy){}; auto has_flow = [&](int xy){ return flow(xy) != 0; }; assimilated[target_side].grow(graph, tmp, search_algo, on_new_node, should_follow_arc, on_new_arc, has_flow); assimilated[target_side].shrink_cut_front(graph); } } template<class Graph> void check_flow_conservation(const Graph&graph){ #ifndef NDEBUG for(int x=0; x<graph.node_count(); ++x) if(!assimilated[source_side].is_inside(x) && !assimilated[target_side].is_inside(x)){ int flow_surplus = 0; for(auto xy : graph.out_arc(x)) flow_surplus += flow(xy); assert(flow_surplus == 0 && "Flow must be conserved outside of the assimilated sides"); } #endif } template<class Graph> void check_invariants(const Graph&graph){ #ifndef NDEBUG for(int side = 0; side < 2; ++side) assert(assimilated[side].node_count_inside() > 0 && "Each side must contain at least one node"); for(int x=0; x<graph.node_count(); ++x) assert((!assimilated[source_side].is_inside(x) || !assimilated[target_side].is_inside(x)) && "a node can not be assimilated by both sides"); for(int side = 0; side < 2; ++side) for(int x=0; x<graph.node_count(); ++x) if(assimilated[side].is_inside(x)) assert(reachable[side].is_inside(x) && "assimilated must be a subset of reachable"); check_flow_conservation(graph); int smaller_reachable_side; if(reachable[source_side].node_count_inside() <= reachable[target_side].node_count_inside()) smaller_reachable_side = source_side; else smaller_reachable_side = target_side; assert(reachable[smaller_reachable_side].node_count_inside() == assimilated[smaller_reachable_side].node_count_inside()); for(int x=0; x<graph.node_count(); ++x) assert(reachable[smaller_reachable_side].is_inside(x) == assimilated[smaller_reachable_side].is_inside(x)); assert(!reachable[source_side].can_grow()); assert(!reachable[target_side].can_grow()); assert(!assimilated[smaller_reachable_side].can_grow()); #endif } AssimilatedNodeSet assimilated[2]; ReachableNodeSet reachable[2]; UnitFlow flow; bool cut_available; }; enum class DistanceType{ no_distance, hop_distance, weighted_distance }; class DistanceAwareCutter{ private: template<class Graph> static void compute_hop_distance_from(const Graph&graph, TemporaryData&tmp, int source, ArrayIDFunc<int>&dist){ dist.fill(std::numeric_limits<int>::max()); dist[source] = 0; auto was_node_seen = [&](int x){return false;}; auto see_node = [](int x){ return true; }; auto should_follow_arc = [&](int xy){ if(dist(graph.tail(xy)) < dist(graph.head(xy)) - 1){ dist[graph.head(xy)] = dist(graph.tail(xy))+1; return true; }else{ return false; } }; auto on_new_arc = [&](int xy){}; BreadthFirstSearch()(graph, tmp, source, was_node_seen, see_node, should_follow_arc, on_new_arc); } template<class Graph> static void compute_weighted_distance_from(const Graph&graph, TemporaryData&tmp, int source, ArrayIDFunc<int>&dist){ Dijkstra<BitIDFunc>dij(graph.node_count()); dij.clear(); dij.add_source_node(source); while(!dij.is_finished()) dij.settle_next(graph.out_arc, graph.head, graph.arc_weight, [](int,bool,int){}); dist = dij.move_distance_array(); } public: template<class Graph> DistanceAwareCutter(const Graph&graph): cutter(graph), node_dist{ArrayIDFunc<int>{graph.node_count()}, ArrayIDFunc<int>{graph.node_count()}}{} template<class Graph, class SearchAlgorithm> void init(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, DistanceType dist_type, SourceTargetPair p, int random_seed){ cutter.init(graph, tmp, search_algo, p); rng.seed(random_seed); switch(dist_type){ case DistanceType::hop_distance: compute_hop_distance_from(graph, tmp, p.source, node_dist[source_side]); compute_hop_distance_from(graph, tmp, p.target, node_dist[target_side]); break; case DistanceType::weighted_distance: compute_weighted_distance_from(graph, tmp, p.source, node_dist[source_side]); compute_weighted_distance_from(graph, tmp, p.target, node_dist[target_side]); break; case DistanceType::no_distance: break; default: assert(false); break; } } CutterStateDump dump_state()const{ return cutter.dump_state(); } template<class Graph, class SearchAlgorithm, class ScorePierceNode> bool advance(const Graph&graph, TemporaryData&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node){ auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, node_dist[side](x), node_dist[1-side](x)); }; return cutter.advance(graph, tmp, search_algo, my_score_pierce_node); } bool is_cut_available()const{ return cutter.is_cut_available(); } template<class Graph, class ScorePierceNode> bool does_next_advance_increase_cut(const Graph&graph, const ScorePierceNode&score_pierce_node){ auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, node_dist[side](x), node_dist[1-side](x)); }; return cutter.does_next_advance_increase_cut(graph, my_score_pierce_node); } static const int source_side = BasicCutter::source_side; static const int target_side = BasicCutter::target_side; int get_current_cut_side()const{ return cutter.get_current_cut_side(); } int get_current_smaller_cut_side_size()const{ return cutter.get_current_smaller_cut_side_size(); } const std::vector<int>&get_current_cut()const{ return cutter.get_current_cut(); } int get_assimilated_node_count()const{ return cutter.get_assimilated_node_count(); } bool is_on_smaller_side(int x)const{ return cutter.is_on_smaller_side(x); } bool is_empty()const{ return node_dist[0].preimage_count() == 0; } private: BasicCutter cutter; ArrayIDFunc<int>node_dist[2]; mt19937 rng; }; class MultiCutter{ public: MultiCutter(){} template<class Graph, class SearchAlgorithm, class ScorePierceNode> void init( const Graph&graph, std::vector<TemporaryData>&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node, DistanceType dist_type, const std::vector<SourceTargetPair>&p, int random_seed, bool should_skip_non_maximum_sides = true ){ while(cutter_list.size() > p.size()) cutter_list.pop_back(); // can not use resize because that requires default constructor... while(cutter_list.size() < p.size()) cutter_list.emplace_back(graph); #pragma omp parallel num_threads(tmp.size()) { int thread_id = omp_get_thread_num(); #pragma omp for schedule(dynamic) for(int i=0; i<(int)p.size(); ++i){ auto&x = cutter_list[i]; auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight, int source_dist, int target_dist){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, source_dist, target_dist, i); }; x.init(graph, tmp[thread_id], search_algo, dist_type, p[i], random_seed+1+i); if(should_skip_non_maximum_sides) while(!x.does_next_advance_increase_cut(graph, my_score_pierce_node)) x.advance(graph, tmp[thread_id], search_algo, my_score_pierce_node); } } int best_cutter_id = -1; int best_cut_size = std::numeric_limits<int>::max(); int best_cutter_weight = 0; for(int i=0; i<(int)p.size(); ++i){ auto&x = cutter_list[i]; if( (int)x.get_current_cut().size() < best_cut_size || ( (int)x.get_current_cut().size() == best_cut_size && x.get_current_smaller_cut_side_size() > best_cutter_weight ) ){ best_cutter_id = i; best_cut_size = x.get_current_cut().size(); best_cutter_weight = x.get_current_smaller_cut_side_size(); } } current_cutter_id = best_cutter_id; current_smaller_side_size = cutter_list[current_cutter_id].get_current_smaller_cut_side_size(); } CutterStateDump dump_state()const{ if(cutter_list.size() != 1) throw std::runtime_error("Can only dump the cutter state if a single instance is run"); return cutter_list[0].dump_state(); } template<class Graph, class SearchAlgorithm, class ScorePierceNode> bool advance(const Graph&graph, std::vector<TemporaryData>&tmp, const SearchAlgorithm&search_algo, const ScorePierceNode&score_pierce_node, bool should_skip_non_maximum_sides = true){ if(graph.node_count() /2 == get_current_smaller_cut_side_size()) return false; int current_cut_size = cutter_list[current_cutter_id].get_current_cut().size(); for(;;){ #pragma omp parallel num_threads(tmp.size()) { int thread_id = omp_get_thread_num(); #pragma omp for schedule(dynamic) for(int i=0; i<(int)cutter_list.size(); ++i){ auto x = std::move(cutter_list[i]); auto my_score_pierce_node = [&](int x, int side, bool causes_augmenting_path, int arc_weight, int source_dist, int target_dist){ return score_pierce_node(x, side, causes_augmenting_path, arc_weight, source_dist, target_dist, i); }; if(x.is_cut_available()){ if((int)x.get_current_cut().size() == current_cut_size){ assert(x.does_next_advance_increase_cut(graph, my_score_pierce_node)); if(x.advance(graph, tmp[thread_id], search_algo, my_score_pierce_node)){ assert((int)x.get_current_cut().size() > current_cut_size); while(!x.does_next_advance_increase_cut(graph, my_score_pierce_node)){ if(!x.advance(graph, tmp[thread_id], search_algo, my_score_pierce_node)) break; if(!should_skip_non_maximum_sides) break; } } } } cutter_list[i] = std::move(x); } } int next_cut_size = std::numeric_limits<int>::max(); for(auto&x:cutter_list) if(x.is_cut_available()) min_to(next_cut_size, (int)x.get_current_cut().size()); if(next_cut_size == std::numeric_limits<int>::max()) return false; int best_cutter_weight = 0; int best_cutter_id = -1; for(int i=0; i<(int)cutter_list.size(); ++i){ if(cutter_list[i].is_cut_available()){ if( (int)cutter_list[i].get_current_cut().size() == next_cut_size && cutter_list[i].get_current_smaller_cut_side_size() > best_cutter_weight ){ best_cutter_id = i; best_cutter_weight = cutter_list[i].get_current_smaller_cut_side_size(); } } } assert(best_cutter_id != -1); current_cut_size = next_cut_size; if(best_cutter_weight <= current_smaller_side_size) continue; current_cutter_id = best_cutter_id; current_smaller_side_size = cutter_list[current_cutter_id].get_current_smaller_cut_side_size(); return true; } } int get_current_smaller_cut_side_size()const{ return current_smaller_side_size; } bool is_on_smaller_side(int x)const{ return cutter_list[current_cutter_id].is_on_smaller_side(x); } const std::vector<int>&get_current_cut()const{ return cutter_list[current_cutter_id].get_current_cut(); } int get_current_cutter_id()const{ return current_cutter_id; } private: std::vector<DistanceAwareCutter>cutter_list; int current_smaller_side_size; int current_cutter_id; }; struct PierceNodeScore{ static constexpr unsigned hash_modulo = ((1u<<31u)-1u); unsigned hash_factor, hash_offset; PierceNodeScore(Config config): config(config){ std::mt19937 gen; gen.seed(config.random_seed); gen(); hash_factor = gen() % hash_modulo; hash_offset = gen() % hash_modulo; } Config config; int operator()(int x, int side, bool causes_augmenting_path, int arc_weight, int source_dist, int target_dist, int cutter_id)const{ auto random_number = [&]{ if(side == BasicCutter::source_side) return (hash_factor * (unsigned)(x<<1) + hash_offset) % hash_modulo; else return (hash_factor * ((unsigned)(x<<1)+1) + hash_offset) % hash_modulo; }; int score; switch(config.pierce_rating){ case Config::PierceRating::max_target_minus_source_hop_dist: case Config::PierceRating::max_target_minus_source_weight_dist: score = target_dist - source_dist; break; case Config::PierceRating::max_target_hop_dist: case Config::PierceRating::max_target_weight_dist: score = target_dist; break; case Config::PierceRating::min_source_hop_dist: case Config::PierceRating::min_source_weight_dist: score = -source_dist; break; case Config::PierceRating::oldest: score = 0; break; case Config::PierceRating::random: score = random_number(); break; case Config::PierceRating::max_arc_weight: score = arc_weight; break; case Config::PierceRating::min_arc_weight: score = -arc_weight; break; case Config::PierceRating::circular_hop: case Config::PierceRating::circular_weight: if(side == BasicCutter::source_side) return -source_dist; else return target_dist; break; default: assert(false); score = 0; } switch(config.avoid_augmenting_path){ case Config::AvoidAugmentingPath::avoid_and_pick_best: if(causes_augmenting_path) score -= 1000000000; break; case Config::AvoidAugmentingPath::do_not_avoid: break; case Config::AvoidAugmentingPath::avoid_and_pick_oldest: if(causes_augmenting_path) score = -1000000000; break; case Config::AvoidAugmentingPath::avoid_and_pick_random: if(causes_augmenting_path) score = random_number() - 1000000000; break; default: assert(false); score = 0; } return score; } }; template<class Graph> class SimpleCutter{ public: SimpleCutter(const Graph&graph, Config config): graph(graph), tmp(config.thread_count, TemporaryData(graph.node_count())), config(config){ } void init(const std::vector<SourceTargetPair>&p, int random_seed){ DistanceType dist_type; if( config.pierce_rating == Config::PierceRating::min_source_hop_dist || config.pierce_rating == Config::PierceRating::max_target_hop_dist || config.pierce_rating == Config::PierceRating::max_target_minus_source_hop_dist || config.pierce_rating == Config::PierceRating::circular_hop ) dist_type = DistanceType::hop_distance; else if( config.pierce_rating == Config::PierceRating::min_source_weight_dist || config.pierce_rating == Config::PierceRating::max_target_weight_dist || config.pierce_rating == Config::PierceRating::max_target_minus_source_weight_dist || config.pierce_rating == Config::PierceRating::circular_weight ) dist_type = DistanceType::weighted_distance; else dist_type = DistanceType::no_distance; switch(config.graph_search_algorithm){ case Config::GraphSearchAlgorithm::pseudo_depth_first_search: cutter.init(graph, tmp, PseudoDepthFirstSearch(), PierceNodeScore(config), dist_type, p, random_seed, config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); break; case Config::GraphSearchAlgorithm::breadth_first_search: cutter.init(graph, tmp, BreadthFirstSearch(), PierceNodeScore(config), dist_type, p, random_seed, config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); break; case Config::GraphSearchAlgorithm::depth_first_search: throw std::runtime_error("depth first search is not yet implemented"); default: assert(false); } } bool advance(){ switch(config.graph_search_algorithm){ case Config::GraphSearchAlgorithm::pseudo_depth_first_search: return cutter.advance(graph, tmp, PseudoDepthFirstSearch(), PierceNodeScore(config), config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); case Config::GraphSearchAlgorithm::breadth_first_search: return cutter.advance(graph, tmp, BreadthFirstSearch(), PierceNodeScore(config), config.skip_non_maximum_sides == Config::SkipNonMaximumSides::skip); case Config::GraphSearchAlgorithm::depth_first_search: throw std::runtime_error("depth first search is not yet implemented"); default: assert(false); return false; } } CutterStateDump dump_state()const{ return cutter.dump_state(); } int get_current_smaller_cut_side_size()const{ return cutter.get_current_smaller_cut_side_size(); } bool is_on_smaller_side(int x)const{ return cutter.is_on_smaller_side(x); } const std::vector<int>&get_current_cut()const{ return cutter.get_current_cut(); } int get_current_cutter_id()const{ return cutter.get_current_cutter_id(); } private: const Graph&graph; std::vector<TemporaryData>tmp; MultiCutter cutter; Config config; }; template<class Graph> SimpleCutter<Graph> make_simple_cutter(const Graph&graph, Config config){ return SimpleCutter<Graph>(graph, config); } inline bool requires_non_negative_weights(Config config){ return config.pierce_rating == Config::PierceRating::min_source_weight_dist || config.pierce_rating == Config::PierceRating::max_target_weight_dist || config.pierce_rating == Config::PierceRating::max_target_minus_source_weight_dist; } std::vector<SourceTargetPair>select_random_source_target_pairs(int node_count, int cutter_count, int seed){ std::vector<SourceTargetPair>p(cutter_count); std::mt19937 rng(seed); std::uniform_int_distribution<int> dist(0, node_count-1); for(auto&x:p){ do{ x.source = dist(rng); x.target = dist(rng); }while(x.source == x.target); } return p; } } #endif
Fig_6.8_6.9_mandelbrotWrong.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> # define NPOINTS 1000 # define MAXITER 1000 void testpoint(void); struct d_complex { double r; double i; }; struct d_complex c; int numoutside = 0; int main() { int i, j; double area, error, eps = 1.0e-5; // Loop over grid of points in the complex plane which contains // the Mandelbrot set, test each point to see whether it is // inside or outside the set #pragma omp parallel for private(c,eps) for (i = 0; i < NPOINTS; i++) { for (j = 0; j < NPOINTS; j++) { c.r = -2.0 + 2.5 * (double)(i) / (double)(NPOINTS) + eps; c.i = 1.125 * (double)(j) / (double)(NPOINTS) + eps; testpoint(); } } // Calculate area of set and error estimate and output the results area = 2.0 * 2.5 * 1.125 * (double)(NPOINTS * NPOINTS - numoutside) / (double)(NPOINTS * NPOINTS); error = area / (double)NPOINTS; printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error); printf("Correct answer should be around 1.506\n"); } void testpoint(void) { // Does the iteration z=z*z+c, until |z| > 2 when point is known to // be outside set. If loop count reaches MAXITER, point is considered // to be inside the set. struct d_complex z; int iter; double temp; z = c; for (iter = 0; iter < MAXITER; iter++) { temp = (z.r * z.r) - (z.i * z.i) + c.r; z.i = z.r * z.i * 2 + c.i; z.r = temp; if ((z.r * z.r + z.i * z.i) > 4.0) { numoutside++; break; } } }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/MagickCore.h" #include "magick/exception-private.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; size_t number_threads; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->number_threads=image_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const PixelPacket *magick_restrict duplex_pixels, *magick_restrict pixels; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticIndexes() returns the image view authentic indexes. % % The format of the GetImageViewAuthenticPixels method is: % % IndexPacket *GetImageViewAuthenticIndexes(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport IndexPacket *GetImageViewAuthenticIndexes( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticIndexQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % PixelPacket *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport PixelPacket *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MaxTextExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualIndexes() returns the image view virtual indexes. % % The format of the GetImageViewVirtualIndexes method is: % % const IndexPacket *GetImageViewVirtualIndexes( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const IndexPacket *GetImageViewVirtualIndexes( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualIndexQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const PixelPacket *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const PixelPacket *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand) % % A description of each parameter follows: % % o wand: the wand. % */ MagickExport ImageView *NewImageView(Image *image) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->exception=AcquireExceptionInfo(); image_view->view=AcquireVirtualCacheView(image_view->image, image_view->exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->exception=AcquireExceptionInfo(); image_view->view=AcquireVirtualCacheView(image_view->image, image_view->exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (destination->extent.height-destination->extent.y); #endif exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewThreads() sets the number of threads in a thread team. % % The format of the SetImageViewDescription method is: % % void SetImageViewThreads(ImageView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetImageViewThreads(ImageView *image_view, const size_t number_threads) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->number_threads=number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const PixelPacket *magick_restrict pixels; register PixelPacket *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register PixelPacket *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
cpd.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "cpd.h" #include "matrix.h" #include "mttkrp.h" #include "timer.h" #include "thd_info.h" #include "util.h" #include <math.h> /****************************************************************************** * API FUNCTIONS *****************************************************************************/ int splatt_cpd_als( splatt_csf const * const tensors, splatt_idx_t const nfactors, double const * const options, splatt_kruskal * factored) { matrix_t * mats[MAX_NMODES+1]; idx_t nmodes = tensors->nmodes; rank_info rinfo; rinfo.rank = 0; /* allocate factor matrices */ idx_t maxdim = tensors->dims[argmax_elem(tensors->dims, nmodes)]; for(idx_t m=0; m < nmodes; ++m) { mats[m] = (matrix_t *) mat_rand(tensors[0].dims[m], nfactors); } mats[MAX_NMODES] = mat_alloc(maxdim, nfactors); val_t * lambda = (val_t *) splatt_malloc(nfactors * sizeof(val_t)); /* do the factorization! */ factored->fit = cpd_als_iterate(tensors, mats, lambda, nfactors, &rinfo, options); /* store output */ factored->rank = nfactors; factored->nmodes = nmodes; factored->lambda = lambda; for(idx_t m=0; m < nmodes; ++m) { factored->dims[m] = tensors->dims[m]; factored->factors[m] = mats[m]->vals; } /* clean up */ mat_free(mats[MAX_NMODES]); for(idx_t m=0; m < nmodes; ++m) { free(mats[m]); /* just the matrix_t ptr, data is safely in factored */ } return SPLATT_SUCCESS; } void splatt_free_kruskal( splatt_kruskal * factored) { free(factored->lambda); for(idx_t m=0; m < factored->nmodes; ++m) { free(factored->factors[m]); } } /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Resets serial and MPI timers that were activated during some CPD * pre-processing. * * @param rinfo MPI rank information. */ static void p_reset_cpd_timers( rank_info const * const rinfo) { timer_reset(&timers[TIMER_ATA]); #ifdef SPLATT_USE_MPI timer_reset(&timers[TIMER_MPI]); timer_reset(&timers[TIMER_MPI_IDLE]); timer_reset(&timers[TIMER_MPI_COMM]); timer_reset(&timers[TIMER_MPI_ATA]); timer_reset(&timers[TIMER_MPI_REDUCE]); timer_reset(&timers[TIMER_MPI_NORM]); timer_reset(&timers[TIMER_MPI_UPDATE]); timer_reset(&timers[TIMER_MPI_FIT]); MPI_Barrier(rinfo->comm_3d); #endif } /** * @brief Find the Frobenius norm squared of a Kruskal tensor. This equivalent * to via computing <X,X>, the inner product of X with itself. We find * this via \lambda^T (AtA * BtB * ...) \lambda, where * is the Hadamard * product. * * @param nmodes The number of modes in the tensor. * @param lambda The vector of column norms. * @param aTa An array of Gram Matrices (AtA, BtB, ...). * * @return The Frobenius norm of X, squared. */ static val_t p_kruskal_norm( idx_t const nmodes, val_t const * const restrict lambda, matrix_t ** aTa) { idx_t const rank = aTa[0]->J; val_t * const restrict av = aTa[MAX_NMODES]->vals; val_t norm_mats = 0; /* use aTa[MAX_NMODES] as scratch space */ for(idx_t i=0; i < rank; ++i) { for(idx_t j=i; j < rank; ++j) { av[j + (i*rank)] = 1.; } } /* aTa[MAX_NMODES] = hada(aTa) */ for(idx_t m=0; m < nmodes; ++m) { val_t const * const restrict atavals = aTa[m]->vals; for(idx_t i=0; i < rank; ++i) { for(idx_t j=i; j < rank; ++j) { av[j + (i*rank)] *= atavals[j + (i*rank)]; } } } /* now compute lambda^T * aTa[MAX_NMODES] * lambda */ for(idx_t i=0; i < rank; ++i) { norm_mats += av[i+(i*rank)] * lambda[i] * lambda[i]; for(idx_t j=i+1; j < rank; ++j) { norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j] * 2; } } return fabs(norm_mats); } /** * @brief Compute the inner product of a Kruskal tensor and an unfactored * tensor. Assumes that 'm1' contains the MTTKRP result along the last * mode of the two input tensors. This naturally follows the end of a * CPD iteration. * * @param nmodes The number of modes in the input tensors. * @param rinfo MPI rank information. * @param thds OpenMP thread data structures. * @param lambda The vector of column norms. * @param mats The Kruskal-tensor matrices. * @param m1 The result of doing MTTKRP along the last mode. * * @return The inner product of the two tensors, computed via: * 1^T hadamard(mats[nmodes-1], m1) \lambda. */ static val_t p_tt_kruskal_inner( idx_t const nmodes, rank_info * const rinfo, thd_info * const thds, val_t const * const restrict lambda, matrix_t ** mats, matrix_t const * const m1) { idx_t const rank = mats[0]->J; idx_t const lastm = nmodes - 1; idx_t const dim = m1->I; val_t const * const m0 = mats[lastm]->vals; val_t const * const mv = m1->vals; val_t myinner = 0; #pragma omp parallel reduction(+:myinner) { int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; for(idx_t r=0; r < rank; ++r) { accumF[r] = 0.; } #pragma omp for for(idx_t i=0; i < dim; ++i) { for(idx_t r=0; r < rank; ++r) { accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)]; } } /* accumulate everything into 'myinner' */ for(idx_t r=0; r < rank; ++r) { myinner += accumF[r] * lambda[r]; } } val_t inner = 0.; #ifdef SPLATT_USE_MPI timer_start(&timers[TIMER_MPI_FIT]); MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_FIT]); #else inner = myinner; #endif return inner; } /** * @brief Compute the fit of a Kruskal tensor, Z, to an input tensor, X. This * is computed via 1 - [sqrt(<X,X> + <Z,Z> - 2<X,Z>) / sqrt(<X,X>)]. * * @param nmodes The number of modes in the input tensors. * @param rinfo MPI rank information. * @param thds OpenMP thread data structures. * @param ttnormsq The norm (squared) of the original input tensor, <X,X>. * @param lambda The vector of column norms. * @param mats The Kruskal-tensor matrices. * @param m1 The result of doing MTTKRP along the last mode. * @param aTa An array of matrices (length MAX_NMODES)containing BtB, CtC, etc. * * @return The inner product of the two tensors, computed via: * \lambda^T hadamard(mats[nmodes-1], m1) \lambda. */ static val_t p_calc_fit( idx_t const nmodes, rank_info * const rinfo, thd_info * const thds, val_t const ttnormsq, val_t const * const restrict lambda, matrix_t ** mats, matrix_t const * const m1, matrix_t ** aTa) { timer_start(&timers[TIMER_FIT]); /* First get norm of new model: lambda^T * (hada aTa) * lambda. */ val_t const norm_mats = p_kruskal_norm(nmodes, lambda, aTa); /* Compute inner product of tensor with new model */ val_t const inner = p_tt_kruskal_inner(nmodes, rinfo, thds, lambda, mats,m1); /* * We actually want sqrt(<X,X> + <Y,Y> - 2<X,Y>), but if the fit is perfect * just make it 0. */ val_t residual = ttnormsq + norm_mats - (2 * inner); if(residual > 0.) { residual = sqrt(residual); } timer_stop(&timers[TIMER_FIT]); return 1 - (residual / sqrt(ttnormsq)); } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ double cpd_als_iterate( splatt_csf const * const tensors, matrix_t ** mats, val_t * const lambda, idx_t const nfactors, rank_info * const rinfo, double const * const opts) { idx_t const nmodes = tensors[0].nmodes; idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS]; /* Setup thread structures. + 64 bytes is to avoid false sharing. * TODO make this better */ splatt_omp_set_num_threads(nthreads); thd_info * thds = thd_init(nthreads, 3, (nmodes * nfactors * sizeof(val_t)) + 64, 0, (nmodes * nfactors * sizeof(val_t)) + 64); matrix_t * m1 = mats[MAX_NMODES]; /* Initialize first A^T * A mats. We redundantly do the first because it * makes communication easier. */ matrix_t * aTa[MAX_NMODES+1]; for(idx_t m=0; m < nmodes; ++m) { aTa[m] = mat_alloc(nfactors, nfactors); memset(aTa[m]->vals, 0, nfactors * nfactors * sizeof(val_t)); mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads); } /* used as buffer space */ aTa[MAX_NMODES] = mat_alloc(nfactors, nfactors); /* mttkrp workspace */ splatt_mttkrp_ws * mttkrp_ws = splatt_mttkrp_alloc_ws(tensors,nfactors,opts); /* Compute input tensor norm */ double oldfit = 0; double fit = 0; val_t ttnormsq = csf_frobsq(tensors); /* setup timers */ p_reset_cpd_timers(rinfo); sp_timer_t itertime; sp_timer_t modetime[MAX_NMODES]; timer_start(&timers[TIMER_CPD]); idx_t const niters = (idx_t) opts[SPLATT_OPTION_NITER]; for(idx_t it=0; it < niters; ++it) { timer_fstart(&itertime); for(idx_t m=0; m < nmodes; ++m) { timer_fstart(&modetime[m]); mats[MAX_NMODES]->I = tensors[0].dims[m]; m1->I = mats[m]->I; /* M1 = X * (C o B) */ timer_start(&timers[TIMER_MTTKRP]); mttkrp_csf(tensors, mats, m, thds, mttkrp_ws, opts); timer_stop(&timers[TIMER_MTTKRP]); #if 0 /* M2 = (CtC .* BtB .* ...)^-1 */ calc_gram_inv(m, nmodes, aTa); /* A = M1 * M2 */ memset(mats[m]->vals, 0, mats[m]->I * nfactors * sizeof(val_t)); mat_matmul(m1, aTa[MAX_NMODES], mats[m]); #else par_memcpy(mats[m]->vals, m1->vals, m1->I * nfactors * sizeof(val_t)); mat_solve_normals(m, nmodes, aTa, mats[m], opts[SPLATT_OPTION_REGULARIZE]); #endif /* normalize columns and extract lambda */ if(it == 0) { mat_normalize(mats[m], lambda, MAT_NORM_2, rinfo, thds, nthreads); } else { mat_normalize(mats[m], lambda, MAT_NORM_MAX, rinfo, thds,nthreads); } /* update A^T*A */ mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads); timer_stop(&modetime[m]); } /* foreach mode */ fit = p_calc_fit(nmodes, rinfo, thds, ttnormsq, lambda, mats, m1, aTa); timer_stop(&itertime); if(rinfo->rank == 0 && opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) { printf(" its = %3"SPLATT_PF_IDX" (%0.3fs) fit = %0.5f delta = %+0.4e\n", it+1, itertime.seconds, fit, fit - oldfit); if(opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_LOW) { for(idx_t m=0; m < nmodes; ++m) { printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)\n", m+1, modetime[m].seconds); } } } if(fit == 1. || (it > 0 && fabs(fit - oldfit) < opts[SPLATT_OPTION_TOLERANCE])) { break; } oldfit = fit; } timer_stop(&timers[TIMER_CPD]); cpd_post_process(nfactors, nmodes, mats, lambda, thds, nthreads, rinfo); /* CLEAN UP */ splatt_mttkrp_free_ws(mttkrp_ws); for(idx_t m=0; m < nmodes; ++m) { mat_free(aTa[m]); } mat_free(aTa[MAX_NMODES]); thd_free(thds, nthreads); return fit; } void cpd_post_process( idx_t const nfactors, idx_t const nmodes, matrix_t ** mats, val_t * const lambda, thd_info * const thds, idx_t const nthreads, rank_info * const rinfo) { val_t * tmp = splatt_malloc(nfactors * sizeof(*tmp)); /* normalize each matrix and adjust lambda */ for(idx_t m=0; m < nmodes; ++m) { mat_normalize(mats[m], tmp, MAT_NORM_2, rinfo, thds, nthreads); for(idx_t f=0; f < nfactors; ++f) { lambda[f] *= tmp[f]; } } free(tmp); }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" #include "magick/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *,DDSVector4 *,unsigned char *,size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse, matte; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { matte = MagickTrue; decoder = ReadUncompressedRGBA; } else { matte = MagickTrue; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { matte = MagickFalse; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { matte = MagickFalse; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { matte = MagickTrue; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { matte = MagickTrue; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); for (n = 0; n < num_images; n++) { if (n != 0) { if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); /* Start a new image */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->matte = matte; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } (void) SetImageBackgroundColor(image); if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; PixelPacket *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if (((x + i) < (ssize_t) image->columns) && ((y + j) < (ssize_t) image->rows)) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code])); if ((colors.a[code] != 0) && (image->matte == MagickFalse)) image->matte=MagickTrue; /* Correct matte */ q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; PixelPacket *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; MagickSizeType alpha_bits; PixelPacket *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image))); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(q,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } SetPixelAlpha(q,QuantumRange); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleMatteType); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else if (alphaBits == 2) { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (color >> 8))); SetPixelGray(q,ScaleCharToQuantum((unsigned char)color)); } else { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255))); } } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = SetMagickInfo("DDS"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->magick_module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT1"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->magick_module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT5"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->magick_module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { register ssize_t i; MagickOffsetType offset; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; if ((w == 1) && (h == 1)) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if (clusterFit == MagickFalse || count == 0) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (!image->matte) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, &image->exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,&image->exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MaxTextExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->matte) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->matte != MagickFalse) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->matte != MagickFalse) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t i, y, bx, by; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const PixelPacket *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p++; match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char* indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4* points, const ssize_t* map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p))); if (image->matte) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p))); p++; } } }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *image,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info)); if (fx_info == (FxInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=image; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishAlignedMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if (((noise_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits=GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(colorize_image,q) == 0)) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=(ChannelType) (channel_mask | (1 << channel)); (void) SetPixelChannelMask(image,channel_mask); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,size_t *,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MagickPathExtent], symbol[MagickPathExtent]; const char *p, *value; Image *image; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t depth, length, level; p=expression; i=GetImageIndexInList(fx_info->images); depth=0; level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); i=(ssize_t) alpha; p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); point.x=alpha; point.y=beta; p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); point.x+=alpha; point.y+=beta; p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; if (length != 0) i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } GetPixelInfo(image,&pixel); (void) InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return(image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return(x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return(y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return(GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return(image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return(image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return(image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return(image->page.y); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return(image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return(GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return(image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double)GetImageDepth(image, fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=0; level=0; subexpression=(const char *) NULL; target=NullPrecedence; while (*expression != '\0') { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,size_t *depth,double *beta,ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 char *q, subexpression[MagickPathExtent]; double alpha, gamma; register const char *p; *beta=0.0; if (exception->severity >= ErrorException) return(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') return(0.0); *subexpression='\0'; p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) (~(size_t) *beta); return(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth, beta,exception)); return(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); return(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); return(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); return(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); return(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; return(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; return(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta, exception); return(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); return(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); return(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { (*depth)++; if (*depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth, beta,exception); (*depth)--; return(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return((~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0.0) return(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); return(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (alpha < 0.0) return(0.0); if (alpha > 1.0) return(1.0); return(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); return(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) return(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) return(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); return(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); return(gcd); } if (LocaleCompare(expression,"g") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(!!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0.0) return(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); return(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth, beta,exception); return(log10(alpha))/log10(2.0); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) return(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); gamma=alpha-floor((alpha/(*beta)))*(*beta); return(gamma); } if (LocaleCompare(expression,"m") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return((alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) return(1.0); if (LocaleCompare(expression,"o") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) return(MagickPHI); if (LocaleCompare(expression,"pi") == 0) return(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) return(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) return(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); return(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0) return(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); return(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth, beta,exception); return((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) return(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (alpha >= 0.0) return(floor(alpha)); return(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth,beta,exception); } while (fabs(alpha) >= MagickEpsilon); return(*beta); } if (LocaleCompare(expression,"w") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); return(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; size_t depth; depth=0; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if (((fx_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImage) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *image_view, *implode_view, *interpolate_view; Image *implode_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); implode_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (implode_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { implode_image=DestroyImage(implode_image); return((Image *) NULL); } if (implode_image->background_color.alpha != OpaqueAlpha) implode_image->alpha_trait=BlendPixelTrait; /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*image->columns; center.y=0.5*image->rows; radius=center.x; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) { scale.x=(double) image->rows/(double) image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,implode_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(implode_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(implode_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait implode_traits=GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(image,interpolate_view,implode_image, method,(double) (factor*delta.x/scale.x+center.x),(double) (factor* delta.y/scale.y+center.y),q,exception); } p+=GetPixelChannels(image); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel=GetPixelChannelChannel(morph_image,i); PixelTrait traits=GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if (((morph_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(morph_images,p) == 0)) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char geometry[MagickPathExtent], *text; DrawInfo *annotate_info; ImageInfo *image_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); image_info=AcquireImageInfo(); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); text=InterpretImageProperties(image_info,(Image *) image,caption, exception); image_info=DestroyImageInfo(image_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)* (metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; if (GetPixelWriteMask(random_image,q) == 0) { q+=GetPixelChannels(random_image); continue; } value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(image,GetPixelRed(left_image,p),r); SetPixelGreen(image,GetPixelGreen(right_image,q),r); SetPixelBlue(image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *image_view, *interpolate_view, *swirl_view; Image *swirl_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); swirl_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (swirl_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.alpha != OpaqueAlpha) swirl_image->alpha_trait=BlendPixelTrait; /* Compute scaling factor. */ center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) scale.x=(double) image->rows/(double) image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,swirl_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(swirl_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(swirl_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait swirl_traits=GetPixelChannelTraits(swirl_image,channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(image,interpolate_view,swirl_image, method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception); } p+=GetPixelChannels(image); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait tint_traits=GetPixelChannelTraits(tint_image,channel); if ((traits == UndefinedPixelTrait) || (tint_traits == UndefinedPixelTrait)) continue; if (((tint_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(tint_image,channel,p[i],q); continue; } } GetPixelInfo(image,&pixel); weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0* (weight*weight))); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas_image, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } canvas_image->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows, MagickTrue,exception); if (oval_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas_image,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception); canvas_image=DestroyImage(canvas_image); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *image_view, *wave_view; Image *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { wave_image=DestroyImage(wave_image); return((Image *) NULL); } if (wave_image->background_color.alpha != OpaqueAlpha) wave_image->alpha_trait=BlendPixelTrait; /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(image,image_view,wave_image,method, (double) x,(double) (y-sine_map[x]),q,exception); q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); image_view=DestroyCacheView(image_view); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns), GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
bodysystemcpu_impl.h
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "bodysystemcpu.h" #include <assert.h> #include <memory.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <cutil_inline.h> #include <algorithm> #ifdef OPENMP #include <omp.h> #endif template <typename T> BodySystemCPU<T>::BodySystemCPU(int numBodies) : m_numBodies(numBodies), m_bInitialized(false), m_force(0), m_softeningSquared(.00125f), m_damping(0.995f) { m_pos = 0; m_vel = 0; _initialize(numBodies); } template <typename T> BodySystemCPU<T>::~BodySystemCPU() { _finalize(); m_numBodies = 0; } template <typename T> void BodySystemCPU<T>::_initialize(int numBodies) { assert(!m_bInitialized); m_numBodies = numBodies; m_pos = new T[m_numBodies*4]; m_vel = new T[m_numBodies*4]; m_force = new T[m_numBodies*3]; memset(m_pos, 0, m_numBodies*4*sizeof(T)); memset(m_vel, 0, m_numBodies*4*sizeof(T)); memset(m_force, 0, m_numBodies*3*sizeof(T)); m_bInitialized = true; } template <typename T> void BodySystemCPU<T>::_finalize() { assert(m_bInitialized); delete [] m_pos; delete [] m_vel; delete [] m_force; } template <typename T> void BodySystemCPU<T>::update(T deltaTime) { assert(m_bInitialized); _integrateNBodySystem(deltaTime); //std::swap(m_currentRead, m_currentWrite); } template <typename T> T* BodySystemCPU<T>::getArray(BodyArray array) { assert(m_bInitialized); T* data = 0; switch (array) { default: case BODYSYSTEM_POSITION: data = m_pos; break; case BODYSYSTEM_VELOCITY: data = m_vel; break; } return data; } template <typename T> void BodySystemCPU<T>::setArray(BodyArray array, const T* data) { assert(m_bInitialized); T* target = 0; switch (array) { default: case BODYSYSTEM_POSITION: target = m_pos; break; case BODYSYSTEM_VELOCITY: target = m_vel; break; } memcpy(target, data, m_numBodies*4*sizeof(T)); } template<typename T> T sqrt_T(T x) { return sqrt(x); } template<> float sqrt_T<float>(float x) { return sqrtf(x); } template <typename T> void bodyBodyInteraction(T accel[3], T posMass0[4], T posMass1[4], T softeningSquared) { T r[3]; // r_01 [3 FLOPS] r[0] = posMass1[0] - posMass0[0]; r[1] = posMass1[1] - posMass0[1]; r[2] = posMass1[2] - posMass0[2]; // d^2 + e^2 [6 FLOPS] T distSqr = r[0] * r[0] + r[1] * r[1] + r[2] * r[2]; distSqr += softeningSquared; // invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)] T invDist = (T)1.0 / sqrt((double)distSqr); T invDistCube = invDist * invDist * invDist; // s = m_j * invDistCube [1 FLOP] T s = posMass1[3] * invDistCube; // (m_1 * r_01) / (d^2 + e^2)^(3/2) [6 FLOPS] accel[0] += r[0] * s; accel[1] += r[1] * s; accel[2] += r[2] * s; } template <typename T> void BodySystemCPU<T>::_computeNBodyGravitation() { #ifdef OPENMP #pragma omp parallel for #endif for(int i = 0; i < m_numBodies; ++i) { int indexForce = 3*i; T acc[3] = {0, 0, 0}; // We unroll this loop 4X for a small performance boost. int j = 0; while ( j < m_numBodies) { bodyBodyInteraction<T>(acc, &m_pos[i], &m_pos[j], m_softeningSquared); j+=4; bodyBodyInteraction<T>(acc, &m_pos[i], &m_pos[j], m_softeningSquared); j+=4; bodyBodyInteraction<T>(acc, &m_pos[i], &m_pos[j], m_softeningSquared); j+=4; bodyBodyInteraction<T>(acc, &m_pos[i], &m_pos[j], m_softeningSquared); j+=4; } m_force[indexForce ] = acc[0]; m_force[indexForce+1] = acc[1]; m_force[indexForce+2] = acc[2]; } } template <typename T> void BodySystemCPU<T>::_integrateNBodySystem(T deltaTime) { _computeNBodyGravitation(); #ifdef OPENMP #pragma omp parallel for #endif for (int i = 0; i < m_numBodies; ++i) { int index = 4*i; int indexForce = 3*i; T pos[3], vel[3], force[3]; pos[0] = m_pos[index+0]; pos[1] = m_pos[index+1]; pos[2] = m_pos[index+2]; T invMass = m_pos[index+3]; vel[0] = m_vel[index+0]; vel[1] = m_vel[index+1]; vel[2] = m_vel[index+2]; force[0] = m_force[indexForce+0]; force[1] = m_force[indexForce+1]; force[2] = m_force[indexForce+2]; // acceleration = force / mass; // new velocity = old velocity + acceleration * deltaTime vel[0] += (force[0] * invMass) * deltaTime; vel[1] += (force[1] * invMass) * deltaTime; vel[2] += (force[2] * invMass) * deltaTime; vel[0] *= m_damping; vel[1] *= m_damping; vel[2] *= m_damping; // new position = old position + velocity * deltaTime pos[0] += vel[0] * deltaTime; pos[1] += vel[1] * deltaTime; pos[2] += vel[2] * deltaTime; m_pos[index+0] = pos[0]; m_pos[index+1] = pos[1]; m_pos[index+2] = pos[2]; m_vel[index+0] = vel[0]; m_vel[index+1] = vel[1]; m_vel[index+2] = vel[2]; } }
Updater.h
/* * Copyright 2016 [See AUTHORS file for list of authors] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _UPDATER_ #define _UPDATER_ #include "DatapointPartitions/DatapointPartitions.h" #include "Gradient/Gradient.h" class Updater { protected: // Keep a reference of the model and datapoints, and partition ordering. Model *model; std::vector<Datapoint *> datapoints; DatapointPartitions *datapoint_partitions; // Gradient object stores extra info for Model processing Gradient gradient; std::vector<int> bookkeeping; // A reference to all_coordinates, which indexes all the coordinates of the model. std::vector<int> all_coordinates; // H, Nu and Mu for updates. virtual double H(int coordinate, int index_into_coordinate_vector) { return 0; } virtual double Nu(int coordinate, int index_into_coordinate_vector) { return 0; } virtual double Mu(int coordinate) { return 0; } // After calling PrepareNu/Mu/H, for the given coordinates, we expect that // calls to Nu/Mu/H are ready. virtual void PrepareNu(const std::vector<int> &coordinates) {} virtual void PrepareMu(const std::vector<int> &coordinates) {} virtual void PrepareH(const Datapoint &datapoint) {} // By default need catch up. virtual bool NeedCatchUp() { return true; } virtual void ApplyGradient(const Datapoint &datapoint) { static int cnt = 0; int n_coords = model->NumCoordinates(); for (const auto &c : datapoint.GetCoordinates()) { const auto &mu = Mu(c); tasvir_log(&model->Data(c, 0, false), sizeof(double) * n_coords); for (int j = 0; j < n_coords; j++) model->Data(c, j, false) = (1 - mu) * model->Data(c, j, false) - Nu(c, j) + H(c, j); } } virtual void CatchUp(int index, int diff) { if (!NeedCatchUp()) return; if (diff < 0) diff = 0; int n_coords = model->NumCoordinates(); double geom_sum = 0; double mu = Mu(index); if (mu != 0) geom_sum = ((1 - pow(1 - mu, diff + 1)) / (1 - (1 - mu))) - 1; tasvir_log(&model->Data(index, 0, false), sizeof(double) * n_coords); for (int j = 0; j < n_coords; j++) model->Data(index, j, false) = pow(1 - mu, diff) * model->Data(index, j, false) - Nu(index, j) * geom_sum; } virtual void CatchUpDatapoint(const Datapoint &datapoint) { int n_coords = model->NumCoordinates(); for (const auto &c : datapoint.GetCoordinates()) { int diff = datapoint.GetOrder() - bookkeeping[c] - 1; CatchUp(c, diff); } } virtual void FinalCatchUp() { const auto &n_coords = model->NumCoordinates(); const auto &parameter_size = model->NumParameters(); // #pragma omp parallel num_threads(FLAGS_n_threads) PrepareNu(all_coordinates); PrepareMu(all_coordinates); // #pragma omp for for (int i = 0; i < model->NumParameters(); i++) { int diff = parameter_size - bookkeeping[i]; CatchUp(i, diff); } } public: Updater(Model *model, std::vector<Datapoint *> &datapoints) { this->model = model; this->datapoints = datapoints; for (int i = 0; i < model->NumParameters(); i++) { // Set up bookkeping. bookkeeping.push_back(0); // Keep an array that has integers 1...n_coords. all_coordinates.push_back(i); } } Updater() {} virtual ~Updater() {} // Could be useful to get partitioning info. virtual void SetUpWithPartitions(DatapointPartitions &partitions) { datapoint_partitions = &partitions; } // Main update method, which is run by multiple threads. virtual void Update(const Datapoint &datapoint) { gradient.Clear(); gradient.datapoint = &datapoint; // First prepare Nu and Mu for catchup since they are independent of the the model. PrepareNu(datapoint.GetCoordinates()); PrepareMu(datapoint.GetCoordinates()); CatchUpDatapoint(datapoint); // After catching up, prepare H and apply the gradient. PrepareH(datapoint); ApplyGradient(datapoint); // Update bookkeeping. for (const auto &coordinate : datapoint.GetCoordinates()) bookkeeping[coordinate] = datapoint.GetOrder(); } // Called before epoch begins. virtual void EpochBegin() {} // Called when the epoch ends. virtual void EpochFinish() { FinalCatchUp(); std::fill(bookkeeping.begin(), bookkeeping.end(), 0); } }; #endif
mxEvaluatePostFunc2d.c
#include "mex.h" #include "mxSWE2d.h" #ifdef _OPENMP #include <omp.h> #endif #define NRHS 5 #define NLHS 1 #define NVAR 3 void mexFunction(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[]) { /* check input & output */ if (nrhs != NRHS) { mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__); mexPrintf("%d inputs required.\n", NRHS); } if (nlhs != NLHS) { mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__); mexPrintf("%d inputs required.\n", NLHS); } /* get inputs */ double hcrit = mxGetScalar(prhs[0]); double* fphys = mxGetPr(prhs[1]); double* hc = mxGetPr(prhs[2]); double* qxc = mxGetPr(prhs[3]); double* qyc = mxGetPr(prhs[4]); /* get dimensions */ const mwSize* dims = mxGetDimensions(prhs[1]); const size_t Np = dims[0]; const size_t K = dims[1]; const size_t ndimOut = 3; const mwSize dimOut[3] = {Np, K, NVAR}; plhs[0] = mxCreateNumericArray(ndimOut, dimOut, mxDOUBLE_CLASS, mxREAL); double* h = fphys; double* qx = fphys + K * Np; double* qy = fphys + 2 * K * Np; double* h_pos = mxGetPr(plhs[0]); double* qx_pos = h_pos + K * Np; double* qy_pos = h_pos + 2 * K * Np; const double ksi = 0.0; // cell area and scalar averages #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (int k = 0; k < K; k++) { double hmean = hc[k]; double qxmean = qxc[k]; double qymean = qyc[k]; if (hmean <= ksi) { for (int n = 0; n < Np; n++) { size_t sk = k * Np + n; h[sk] = 0; qx[sk] = 0; qy[sk] = 0; } continue; } double hmin = h[k * Np]; for (int n = 0; n < Np; n++) { hmin = min(hmin, h[k * Np + n]); } double theta; if (hmin < hmean) { theta = min((hmean - ksi) / (hmean - hmin), 1.0); } else { theta = 0.0; } for (int n = 0; n < Np; n++) { size_t sk = k * Np + n; h_pos[sk] = theta * (h[sk] - hmean) + hmean; qx_pos[sk] = theta * (qx[sk] - qxmean) + qxmean; qy_pos[sk] = theta * (qy[sk] - qymean) + qymean; if (h_pos[sk] < hcrit) { // dry nodes qx_pos[sk] = 0.0; qy_pos[sk] = 0.0; } } } return; }
aarch64_vfabi_WidestDataSize.c
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s // REQUIRES: aarch64-registered-target // Note: -fopemp and -fopenmp-simd behavior are expected to be the same. // This test checks the values of Widest Data Size (WDS), as defined // in https://github.com/ARM-software/abi-aa/tree/main/vfabia64 // // WDS is used to check the accepted values <N> of `simdlen(<N>)` when // targeting fixed-length SVE vector function names. The values of // `<N>` that are accepted are such that for X = WDS * <N> * 8, // 128-bit <= X <= 2048-bit and X is a multiple of 128-bit. #pragma omp declare simd simdlen(8) #pragma omp declare simd simdlen(16) #pragma omp declare simd simdlen(256) #pragma omp declare simd simdlen(272) char WDS_is_sizeof_char(char in); // WDS = 1, simdlen(8) and simdlen(272) are not generated. // CHECK-DAG: _ZGVsM16v_WDS_is_sizeof_char // CHECK-DAG: _ZGVsM256v_WDS_is_sizeof_char // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_char #pragma omp declare simd simdlen(4) #pragma omp declare simd simdlen(8) #pragma omp declare simd simdlen(128) #pragma omp declare simd simdlen(136) char WDS_is_sizeof_short(short in); // WDS = 2, simdlen(4) and simdlen(136) are not generated. // CHECK-DAG: _ZGVsM8v_WDS_is_sizeof_short // CHECK-DAG: _ZGVsM128v_WDS_is_sizeof_short // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_short #pragma omp declare simd linear(sin) notinbranch simdlen(2) #pragma omp declare simd linear(sin) notinbranch simdlen(4) #pragma omp declare simd linear(sin) notinbranch simdlen(64) #pragma omp declare simd linear(sin) notinbranch simdlen(68) void WDS_is_sizeof_float_pointee(float in, float *sin); // WDS = 4, simdlen(2) and simdlen(68) are not generated. // CHECK-DAG: _ZGVsM4vl4_WDS_is_sizeof_float_pointee // CHECK-DAG: _ZGVsM64vl4_WDS_is_sizeof_float_pointee // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_float_pointee #pragma omp declare simd linear(sin) notinbranch simdlen(2) #pragma omp declare simd linear(sin) notinbranch simdlen(4) #pragma omp declare simd linear(sin) notinbranch simdlen(32) #pragma omp declare simd linear(sin) notinbranch simdlen(34) void WDS_is_sizeof_double_pointee(float in, double *sin); // WDS = 8 because of the linear clause, simdlen(34) is not generated. // CHECK-DAG: _ZGVsM2vl8_WDS_is_sizeof_double_pointee // CHECK-DAG: _ZGVsM4vl8_WDS_is_sizeof_double_pointee // CHECK-DAG: _ZGVsM32vl8_WDS_is_sizeof_double_pointee // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_double_pointee #pragma omp declare simd simdlen(2) #pragma omp declare simd simdlen(4) #pragma omp declare simd simdlen(32) #pragma omp declare simd simdlen(34) double WDS_is_sizeof_double(double in); // WDS = 8, simdlen(34) is not generated. // CHECK-DAG: _ZGVsM2v_WDS_is_sizeof_double // CHECK-DAG: _ZGVsM4v_WDS_is_sizeof_double // CHECK-DAG: _ZGVsM32v_WDS_is_sizeof_double // CHECK-NOT: _ZGV{{.*}}_WDS_is_sizeof_double static char C; static short S; static float F; static double D; void do_something() { C = WDS_is_sizeof_char(C); C = WDS_is_sizeof_short(S); WDS_is_sizeof_float_pointee(F, &F); WDS_is_sizeof_double_pointee(F, &D); D = WDS_is_sizeof_double(D); }
single.c
#include <stdio.h> #include <omp.h> int main() { int n = 9, i, a; int b[n]; int t[n]; for (i=0; i<n; i++) b[i] = -3; #pragma omp parallel { #pragma omp single { printf("Introduce valor de inicialización a: "); scanf("%d", &a ); printf("Single ejecutada por el thread %d\n", omp_get_thread_num()); } #pragma omp for for (i=0; i<n; i++){ b[i] = a; t[i] = omp_get_thread_num(); } } printf("Depués de la región parallel:\n"); for (i=0; i<n; i++){ printf("b[%d] = %d, thread no: %d\t",i,b[i],t[i]); printf("\n"); } return 0; }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd64_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd64_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd42_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x12 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); __m128 _r8 = _mm_load_ps(r0 + 4 * 8); __m128 _r9 = _mm_load_ps(r0 + 4 * 9); __m128 _ra = _mm_load_ps(r0 + 4 * 10); __m128 _rb = _mm_load_ps(r0 + 4 * 11); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r8); _mm_store_ps(tmpptr + 4 * 3, _r1); _mm_store_ps(tmpptr + 4 * 4, _r5); _mm_store_ps(tmpptr + 4 * 5, _r9); _mm_store_ps(tmpptr + 4 * 6, _r2); _mm_store_ps(tmpptr + 4 * 7, _r6); _mm_store_ps(tmpptr + 4 * 8, _ra); _mm_store_ps(tmpptr + 4 * 9, _r3); _mm_store_ps(tmpptr + 4 * 10, _r7); _mm_store_ps(tmpptr + 4 * 11, _rb); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); __m128 _r4 = _mm_load_ps(r0 + 4 * 4); __m128 _r5 = _mm_load_ps(r0 + 4 * 5); __m128 _r6 = _mm_load_ps(r0 + 4 * 6); __m128 _r7 = _mm_load_ps(r0 + 4 * 7); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r4); _mm_store_ps(tmpptr + 4 * 2, _r1); _mm_store_ps(tmpptr + 4 * 3, _r5); _mm_store_ps(tmpptr + 4 * 4, _r2); _mm_store_ps(tmpptr + 4 * 5, _r6); _mm_store_ps(tmpptr + 4 * 6, _r3); _mm_store_ps(tmpptr + 4 * 7, _r7); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r2 = _mm_load_ps(r0 + 4 * 2); __m128 _r3 = _mm_load_ps(r0 + 4 * 3); _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); _mm_store_ps(tmpptr + 4 * 2, _r2); _mm_store_ps(tmpptr + 4 * 3, _r3); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 __m128 _r0 = _mm_load_ps(r0); __m128 _r1 = _mm_load_ps(r0 + 4); __m128 _r01_0 = _mm_unpacklo_ps(_r0, _r1); __m128 _r01_1 = _mm_unpackhi_ps(_r0, _r1); _mm_store_ps(tmpptr, _r01_0); _mm_store_ps(tmpptr + 4, _r01_1); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { __m128 _val = _mm_load_ps(r0); _mm_store_ps(tmpptr, _val); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); __m128 _sum8 = _mm_setzero_ps(); __m128 _sum9 = _mm_setzero_ps(); __m128 _suma = _mm_setzero_ps(); __m128 _sumb = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); __m128 _val8 = _mm_load1_ps(r0 + 8); __m128 _val9 = _mm_load1_ps(r0 + 9); __m128 _vala = _mm_load1_ps(r0 + 10); __m128 _valb = _mm_load1_ps(r0 + 11); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); _sum8 = _mm_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm_comp_fmadd_ps(_val9, _w0, _sum9); _suma = _mm_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); _mm_store_ps(output0_tm + 4 * 8, _sum8); _mm_store_ps(output0_tm + 4 * 9, _sum9); _mm_store_ps(output0_tm + 4 * 10, _suma); _mm_store_ps(output0_tm + 4 * 11, _sumb); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); __m128 _sum4 = _mm_setzero_ps(); __m128 _sum5 = _mm_setzero_ps(); __m128 _sum6 = _mm_setzero_ps(); __m128 _sum7 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); __m128 _val4 = _mm_load1_ps(r0 + 4); __m128 _val5 = _mm_load1_ps(r0 + 5); __m128 _val6 = _mm_load1_ps(r0 + 6); __m128 _val7 = _mm_load1_ps(r0 + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); _mm_store_ps(output0_tm + 4 * 4, _sum4); _mm_store_ps(output0_tm + 4 * 5, _sum5); _mm_store_ps(output0_tm + 4 * 6, _sum6); _mm_store_ps(output0_tm + 4 * 7, _sum7); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); __m128 _sum2 = _mm_setzero_ps(); __m128 _sum3 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); __m128 _val2 = _mm_load1_ps(r0 + 2); __m128 _val3 = _mm_load1_ps(r0 + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); _mm_store_ps(output0_tm + 4 * 2, _sum2); _mm_store_ps(output0_tm + 4 * 3, _sum3); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 __m128 _sum0 = _mm_setzero_ps(); __m128 _sum1 = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); __m128 _val1 = _mm_load1_ps(r0 + 1); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 4; } _mm_store_ps(output0_tm, _sum0); _mm_store_ps(output0_tm + 4, _sum1); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 __m128 _sum = _mm_setzero_ps(); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(k0); __m128 _val0 = _mm_load1_ps(r0); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); r0 += 1; k0 += 4; } _mm_store_ps(output0_tm, _sum); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd42_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_binop__bxnor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bxnor_int8 // A.*B function (eWiseMult): GB_AemultB__bxnor_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bxnor_int8 // C+=b function (dense accum): GB_Cdense_accumb__bxnor_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int8 // C=scalar+B GB_bind1st__bxnor_int8 // C=scalar+B' GB_bind1st_tran__bxnor_int8 // C=A+scalar GB_bind2nd__bxnor_int8 // C=A'+scalar GB_bind2nd_tran__bxnor_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ~((x) ^ (y)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT8 || GxB_NO_BXNOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bxnor_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bxnor_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bxnor_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB_bind1st_tran__bxnor_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB_bind2nd_tran__bxnor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Loop.h
// This file is part of the Peano project. For conditions of distribution and // use, please see the copyright notice at www.peano-framework.org /** * This file defines some macros for d-dimensional loops. * * @version $Revision: 1.10 $ * @author Tobias Weinzierl */ #ifndef _PEANO_UTILS_LOOP_H_ #define _PEANO_UTILS_LOOP_H_ #include "peano/utils/Globals.h" #include "tarch/la/Vector.h" #include "tarch/multicore/Loop.h" #include <bitset> namespace peano { namespace utils { /** * Is used by the z-loop. See macro dforz. */ typedef std::bitset<DIMENSIONS> LoopDirection; /** * This operation performs a d-dimensional increment on a given integer vector: * The first component of the vector is incremented. If the first component is * greater than max-1, the component is set zero and the next component is * incremented by one. This operation is used often by d-dimensional for-loops. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, int max); /** * This operation performs a d-dimensional decrement on a given integer vector: * The first component of the vector is decremented. If the first component is * smaller than 0, the component is set to max and the next component is * decremented by one. */ void dDec(tarch::la::Vector<DIMENSIONS,int>& counter, int max); /** * This operation performs a d-dimensional increment on a given integer vector: * The first component of the vector is incremented. If the first component is * greater than max(0)-1, the component is set zero and the next component is * incremented by one. This operation is used often by d-dimensional for-loops. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, const tarch::la::Vector<DIMENSIONS,int>& max); /** * Perform a d-dimensional increment by value increment: The first component * of the counter is incremented by increment. Afterwards, the operation * checks the first entry: If it exceeds max, its module value is set, the * next component is incremented by increment, and the check continues. */ void dIncByVector(tarch::la::Vector<DIMENSIONS,int>& counter, int max, int increment); /** * Perform a scalar increment of a vector: The operation equals a sequence of * increment calls to dInc(). */ void dIncByScalar(tarch::la::Vector<DIMENSIONS,int>& counter, int max, int increment); /** * Same operation as dInc(tarch::la::Vector<DIMENSIONS,int>,int), but now one dimension is not taken * into consideration. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, int max, int doNotExamine); /** * Operation similar to dInc, but is given a direction bitset that identifies * whether the counters has to be incremented or decremented. See the dforz * macro for an example how to use dInc. */ void dInc(tarch::la::Vector<DIMENSIONS,int>& counter, int max, LoopDirection& direction ); /** * Element-wise comparison for the for loops. * @return true if all entries of counter are smaller max */ int dCmp(const tarch::la::Vector<DIMENSIONS,int>& counter, int max); /** * Element-wise comparison for the loops. * @return true if all entries of counter are smaller than their corresponding * entries in max */ int dCmp(const tarch::la::Vector<DIMENSIONS,int>& counter, const tarch::la::Vector<DIMENSIONS,int>& max); /** * compares two vectors with regards to their linearised value. * * @returns true, if dLinearised(counter, XXX) < dLinearised(max, XXX) */ bool dCmpLinearOrder(const tarch::la::Vector<DIMENSIONS,int>& counter, const tarch::la::Vector<DIMENSIONS,int>& max); /** * This operation is called pretty often and, thus, might cause a significant * slowdown in the overall performance. Therefore, I introduced a aggressive * optimization based on lookup tables. This optimization is switched on if * DLOOP_AGGRESSIVE is specified (default in peano project). Two preconditions * have to be fulfilled in this case: All parameters have to stay within * certain boundaries (all positive, max smaller or equal to 5) * and one has to call both setupLookupTableForDLinearised() and * setupLookupTableForDDelinearised() before using dLinearised() or * dDelinearised(). * * Obviously, creating a lookup table for these two operations is not that * simple, since the parameter space has to be mapped onto a unique key. To * end up with a simple mapping, all the constraints from above are added. * Although the mapping might be slow, it is still faster than computing the * partial sums of a to the power of b. * * @return the linearisation of the counter, i.e. the k-th component is * multiplied by max^k and the results are accumulated. */ int dLinearised( const tarch::la::Vector<DIMENSIONS,int>& counter, int max ); /** * Special 2d variant of dLinearised that works also if you compile with other * dimensions. */ int d2Linearised( const tarch::la::Vector<2,int>& counter, int max ); /** * Special 3d variant of dLinearised that works also if you compile with other * dimensions. */ int d3Linearised( const tarch::la::Vector<3,int>& counter, int max ); /** * Linearisation not Optimised * * This operation's semantics equals dLinearised, but the operation is not * optimised at all. It thus allows to have arbitrary argument values. Yet, * this version is not optimised, i.e. it might become a bottleneck. */ int dLinearisedWithoutLookup( const tarch::la::Vector<DIMENSIONS,int>& counter, int max ); /** * Counterpart of dLinearised(). * * This operation's semantics equals dDeLinearised, but the operation is not * optimised at all. It thus allows to have arbitrary argument values. Yet, * this version is not optimised, i.e. it might become a bottleneck. */ tarch::la::Vector<DIMENSIONS,int> dDelinearised(int value, int max ); /** * Delinearization not optimised. */ tarch::la::Vector<DIMENSIONS,int> dDelinearisedWithoutLookup(int value, int max); void setupLookupTableForDLinearised(); void setupLookupTableForDDelinearised(); /** * @return a vector containing zero values only. */ tarch::la::Vector<DIMENSIONS,int> dStartVector(); /** * @return a vector containing only zero values besides the dim-th entry. This * entry is set value. */ tarch::la::Vector<DIMENSIONS,int> dStartVector(int dim, int value); /** * Creates a start vector. Each component is set either 0 or max-1 depending * on direction: If direction is true, then the value 0 is zero. * * @return a start vector for an osciallating loop. */ tarch::la::Vector<DIMENSIONS,int> dStartVector( int max, const LoopDirection& direction ); } } /** * Very often one needs a d-dimensional for loop. A d-dimensional for loop is * something like * \code * for (x(0)=0; x(0)<N; x(0)++) * for (x(1)=0; x(1)<N; x(1)++) * for (x(2)=0; x(2)<N; x(2)++) * \endcode * with d nested for loops. Thus, one has to code such loops for every d * manually. This macro offers a d-independend alternative, just write * \code * dfor (x,N) { * ... * } * \endcode * The precompiler extracts this macro and within the loop body, you are able * to use the integer tinyvector x. * * Here is an example: * \code * dfor(a,2) { * std::cout << a << ","; * } * \endcode * results in [0,0], [1,0], [0,1], [1,1] if DIMENSIONS equals 2. If DIMENSION * equals 3 the same construct gives you [0,0,0], [1,0,0], [0,1,0], [1,1,0], * [0,0,1], [1,0,1], [0,1,1], [1,1,1]. */ #define dfor(counter,max) \ for (tarch::la::Vector<DIMENSIONS,int> counter = peano::utils::dStartVector(); peano::utils::dCmp(counter,max); peano::utils::dInc(counter,max) ) /** * Shortcut For dfor(counter,4) * * The usage of this optimised shortcut differs from dfor: You have to * replace both the dfor and the opening bracket by this macro, i.e. * * \code * dfor(counter,4) { * \endcode * * becomes * * \code * dfor4(counter) * \endcode * * You usually use this macro with * \code * #pragma unroll(FOUR_POWER_D) * \endcode * or * \code * #pragma omp parallel for schedule(static) * \endcode * * If you work with this specialised version of dfor on a variable k, two * counter variables are available within the loop's scope. The variable k * itself with type tarch::la::Vector<DIMENSIONS,int>. Furthermore, there's always a variable * kScalar giving you k's value linearised. */ #define dfor4(counter) \ for( int counter##Scalar=0; counter##Scalar<FOUR_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 4; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * Shortcut For dfor(counter,3) * * The usage of this optimised shortcut differs from dfor: You have to * replace both the dfor and the opening bracket by this macro, i.e. * * \code * dfor(counter,3) { * \endcode * * becomes * * \code * dfor3(counter) * \endcode * * You usually use this macro with * \code * #pragma unroll(THREE_POWER_D) * \endcode * or * \code * #pragma omp parallel for schedule(static) * \endcode * * If you work with this specialised version of dfor on a variable k, two * counter variables are available within the loop's scope. The variable k * itself with type tarch::la::Vector<DIMENSIONS,int>. Furthermore, there's always a variable * kScalar giving you k's value linearised. */ #define dfor3(counter) \ for( int counter##Scalar=0; counter##Scalar<THREE_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 3; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #define dfor5(counter) \ for( int counter##Scalar=0; counter##Scalar<FIVE_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 5; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #define dfor7(counter) \ for( int counter##Scalar=0; counter##Scalar<SEVEN_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 7; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #define dfor9(counter) \ for( int counter##Scalar=0; counter##Scalar<NINE_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 9; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to two, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. It is * way slower than dfor if you compile with Dim2. * * Please use this macro with an enddforx macro closing your scope rather than * brackets. * * Please note that counterScalar is already a linearised version of your counter. * * Please note that you need a specialised linearisation function (depending on d * explicitly) to work with 2d index vectors within such a loop. Do not just use * dLinearised, but use the d2Linearised or d3Linearised variant instead. */ #define d2for(counter,max) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(2,max); counter##Scalar++) { \ tarch::la::Vector<2,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=2-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= max; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to two, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d2for2(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(2,2); counter##Scalar++) { \ tarch::la::Vector<2,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=2-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 2; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to three, we might nevertheless need * three-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d3for2(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(3,2); counter##Scalar++) { \ tarch::la::Vector<3,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=3-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 2; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to three, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. It is * way slower than dfor if you compile with Dim2. * * Please use this macro with an enddforx macro closing your scope rather than * brackets. * * Please note that counterScalar is already a linearised version of your counter. * * Please note that you need a specialised linearisation function (depending on d * explicitly) to work with 2d index vectors within such a loop. Do not just use * dLinearised, but use the d2Linearised or d3Linearised variant instead. */ #define d3for(counter,max) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(3,max); counter##Scalar++) { \ tarch::la::Vector<3,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=3-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= max; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to two, we might nevertheless need * two-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d2for3(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(2,3); counter##Scalar++) { \ tarch::la::Vector<2,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=2-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 3; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * If DIMENSIONS is not set to three, we might nevertheless need * three-dimensional loops. So this is the corresponding macro. * * Please use enddforx to close a loop started with this macro. */ #define d3for3(counter) \ for( int counter##Scalar=0; counter##Scalar<tarch::la::aPowI(3,3); counter##Scalar++) { \ tarch::la::Vector<3,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=3-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 3; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} /** * Shortcut For dfor(counter,2) * * The usage of this optimised shortcut differs from dfor: You have to * replace both the dfor and the opening bracket by this macro, i.e. * * \code * dfor(counter,2) { * \endcode * * becomes * * \code * dfor2(counter) * \endcode * * You usually use this macro with * \code * #pragma unroll(TWO_POWER_D) * \endcode * or * \code * #pragma omp parallel for schedule(static) * \endcode * * If you work with this specialised version of dfor on a variable k, two * counter variables are available within the loop's scope. The variable k * itself with type tarch::la::Vector<DIMENSIONS,int>. Furthermore, there's always a variable * kScalar giving you k's value linearised. */ /* * bit flipping used for DIMENSIONS = 2, and DIMENSIONS = 3 * for more information about the idea principle used refer to https://opt-patterns.wiki.tum.de/dfor */ #if DIMENSIONS == 2 #define dfor2(counter) \ for( int counter##Scalar=0, AA##counter = 0, BB##counter = 0; counter##Scalar<TWO_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ counter(0) = AA##counter; \ counter(1) = BB##counter; \ AA##counter = !AA##counter; \ BB##counter = !(AA##counter ^ BB##counter); #elif DIMENSIONS == 3 #define dfor2(counter) \ for( int counter##Scalar=0, AA##counter = 0, BB##counter = 0, CC##counter = 0; counter##Scalar<TWO_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ counter(0) = AA##counter; \ counter(1) = BB##counter; \ counter(2) = CC##counter; \ AA##counter = !AA##counter; \ BB##counter = !(AA##counter ^ BB##counter); \ CC##counter = CC##counter || (!AA##counter && !BB##counter && !CC##counter); #else #define dfor2(counter) \ for( int counter##Scalar=0; counter##Scalar<TWO_POWER_D; counter##Scalar++) { \ tarch::la::Vector<DIMENSIONS,int> counter; \ { \ int copy##counter##Scalar = counter##Scalar; \ for (int counter##ddd=DIMENSIONS-1; counter##ddd>=0; counter##ddd--) { \ int counter##aPowI = 1; \ for (int counter##jjj=0; counter##jjj<counter##ddd; counter##jjj++) { \ counter##aPowI *= 2; \ } \ counter(counter##ddd) = copy##counter##Scalar / counter##aPowI; \ copy##counter##Scalar -= counter(counter##ddd) * counter##aPowI; \ }} #endif /** * I prefer to use this macro for dforx instead of a closing bracket as many * syntax parser fail otherwise. */ #define enddforx } /** * This is an exclusive d-dimensional for loop. Exclusive means, there is one * dimension that is not manipulated during the for loop. This dimension * (entry of the counter) is specified by dim and has the value value * throughout the for-loop. */ #define dfore(counter,max,dim,value) \ for (tarch::la::Vector<DIMENSIONS,int> counter = peano::utils::dStartVector(dim,value); peano::utils::dCmp(counter,max); peano::utils::dInc(counter,max,dim) ) /** * This is a d-dimensional z-loop. A z-loop is a d-dimensional loop the * counter direction changes everytime an inner loop direction has changed. * So this is the loop corresponding to a Peano curve. The for loop is passed * a counter name, the number of steps to perform in each direction and a * direction flag that identifies the initial direction. Note that this * argument has to be a real variable, it might not be a constant. The * direction flag array identifies for each direction, whether the initial * loop goes along the axis or not. The type of direction is LoopDirection. * * Here are some examples for two dimensions: * \code * LoopDirection d(3); // equals {true,true} and identifies the standard * // Peano Leitmotiv * zfor( a, 3, d ) { * std::cout << a; * } * \endcode * yields in [0,0],[1,0],[2,0],[2,1],[1,1],[0,1],[0,2],[1,2],[2,2]. * * \code * LoopDirection d(1); // equals {true, false} and specifies a Peano curve * // from the left top to right bottom * zfor( a, 3, d ) { * std::cout << a; * } * \endcode * yields in [0,2],[1,2],[2,2],[2,1],[1,1],[0,1],[0,0],[1,0],[2,0]. */ #define zfor(counter,max,direction) \ {for (tarch::la::Vector<DIMENSIONS,int> counter = peano::utils::dStartVector(max,direction); peano::utils::dCmp(counter,max); peano::utils::dInc(counter,max,direction) ) { /* * zfor3 is an optimized version of zfor for max = 3 * A lookup table is used for dim=2 and dim=3, for higher dimensions * the standard zfor is used instead */ #if DIMENSIONS == 2 static const int lookupzfor[4][9][2] = { {{2,2},{1,2},{0,2},{0,1},{1,1},{2,1},{2,0},{1,0},{0,0}}, {{0,2},{1,2},{2,2},{2,1},{1,1},{0,1},{0,0},{1,0},{2,0}}, {{2,0},{1,0},{0,0},{0,1},{1,1},{2,1},{2,2},{1,2},{0,2}}, {{0,0},{1,0},{2,0},{2,1},{1,1},{0,1},{0,2},{1,2},{2,2}} }; #define zfor3(counter, direction) \ { tarch::la::Vector<DIMENSIONS,int> counter; \ int counter##initDir = static_cast<int>(direction.to_ulong()); \ for (int counter##i = 0; counter##i < 9; ++counter##i) { \ counter(0) = lookupzfor[counter##initDir][counter##i][0]; \ counter(1) = lookupzfor[counter##initDir][counter##i][1]; #elif DIMENSIONS == 3 static const int lookupzfor[8][27][3] = { {{2,2,2},{1,2,2},{0,2,2},{0,1,2},{1,1,2},{2,1,2},{2,0,2},{1,0,2},{0,0,2},{0,0,1},{1,0,1},{2,0,1},{2,1,1},{1,1,1},{0,1,1},{0,2,1},{1,2,1},{2,2,1},{2,2,0},{1,2,0},{0,2,0},{0,1,0},{1,1,0},{2,1,0},{2,0,0},{1,0,0},{0,0,0}}, {{0,2,2},{1,2,2},{2,2,2},{2,1,2},{1,1,2},{0,1,2},{0,0,2},{1,0,2},{2,0,2},{2,0,1},{1,0,1},{0,0,1},{0,1,1},{1,1,1},{2,1,1},{2,2,1},{1,2,1},{0,2,1},{0,2,0},{1,2,0},{2,2,0},{2,1,0},{1,1,0},{0,1,0},{0,0,0},{1,0,0},{2,0,0}}, {{2,0,2},{1,0,2},{0,0,2},{0,1,2},{1,1,2},{2,1,2},{2,2,2},{1,2,2},{0,2,2},{0,2,1},{1,2,1},{2,2,1},{2,1,1},{1,1,1},{0,1,1},{0,0,1},{1,0,1},{2,0,1},{2,0,0},{1,0,0},{0,0,0},{0,1,0},{1,1,0},{2,1,0},{2,2,0},{1,2,0},{0,2,0}}, {{0,0,2},{1,0,2},{2,0,2},{2,1,2},{1,1,2},{0,1,2},{0,2,2},{1,2,2},{2,2,2},{2,2,1},{1,2,1},{0,2,1},{0,1,1},{1,1,1},{2,1,1},{2,0,1},{1,0,1},{0,0,1},{0,0,0},{1,0,0},{2,0,0},{2,1,0},{1,1,0},{0,1,0},{0,2,0},{1,2,0},{2,2,0}}, {{2,2,0},{1,2,0},{0,2,0},{0,1,0},{1,1,0},{2,1,0},{2,0,0},{1,0,0},{0,0,0},{0,0,1},{1,0,1},{2,0,1},{2,1,1},{1,1,1},{0,1,1},{0,2,1},{1,2,1},{2,2,1},{2,2,2},{1,2,2},{0,2,2},{0,1,2},{1,1,2},{2,1,2},{2,0,2},{1,0,2},{0,0,2}}, {{0,2,0},{1,2,0},{2,2,0},{2,1,0},{1,1,0},{0,1,0},{0,0,0},{1,0,0},{2,0,0},{2,0,1},{1,0,1},{0,0,1},{0,1,1},{1,1,1},{2,1,1},{2,2,1},{1,2,1},{0,2,1},{0,2,2},{1,2,2},{2,2,2},{2,1,2},{1,1,2},{0,1,2},{0,0,2},{1,0,2},{2,0,2}}, {{2,0,0},{1,0,0},{0,0,0},{0,1,0},{1,1,0},{2,1,0},{2,2,0},{1,2,0},{0,2,0},{0,2,1},{1,2,1},{2,2,1},{2,1,1},{1,1,1},{0,1,1},{0,0,1},{1,0,1},{2,0,1},{2,0,2},{1,0,2},{0,0,2},{0,1,2},{1,1,2},{2,1,2},{2,2,2},{1,2,2},{0,2,2}}, {{0,0,0},{1,0,0},{2,0,0},{2,1,0},{1,1,0},{0,1,0},{0,2,0},{1,2,0},{2,2,0},{2,2,1},{1,2,1},{0,2,1},{0,1,1},{1,1,1},{2,1,1},{2,0,1},{1,0,1},{0,0,1},{0,0,2},{1,0,2},{2,0,2},{2,1,2},{1,1,2},{0,1,2},{0,2,2},{1,2,2},{2,2,2}} }; #define zfor3(counter, direction) \ { tarch::la::Vector<DIMENSIONS,int> counter; \ int counter##initDir = static_cast<int>(direction.to_ulong()); \ for (int counter##i = 0; counter##i < 27; ++counter##i) { \ counter(0) = lookupzfor[counter##initDir][counter##i][0]; \ counter(1) = lookupzfor[counter##initDir][counter##i][1]; \ counter(2) = lookupzfor[counter##initDir][counter##i][2]; #else #define zfor3(counter, direction) \ zfor(counter, 3, direction) #endif #define endzfor }} #endif
area.c
#include <stdio.h> #include <math.h> #define LEFT 0.0F #define RIGHT 3.1416F #define SLICES 100000000 #define WIDTH ((RIGHT-LEFT)/SLICES) int main() { int i; double area=0.0f, s; #pragma omp parallel for private(s) for (i=0;i<SLICES;++i) { s = sin(LEFT+i*WIDTH)*WIDTH; #pragma omp critical area += s; } printf("area = %f\n", area); return 0; }
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "timer.h" #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "mgraph.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename VertexID_, typename DestID_ = VertexID_, typename WeightT_ = VertexID_, bool invert = true> class BuilderBase { typedef EdgePair<VertexID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_vertices_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); if (symmetrize_) printf("Building a symmetrized graph\n"); needs_weights_ = !std::is_same<VertexID_, DestID_>::value; } DestID_ GetSource(EdgePair<VertexID_, VertexID_> e) { return e.u; } DestID_ GetSource(EdgePair<VertexID_, VertexWeight<VertexID_, WeightT_>> e) { return VertexWeight<VertexID_, WeightT_>(e.u, e.v.w); } VertexID_ FindMaxVertexID(const EdgeList &el) { VertexID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (VertexID_) e.v); } return max_seen; } pvector<VertexID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<VertexID_> degrees(num_vertices_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(VertexID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<VertexID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<VertexID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void OldSquishCSR(const CSRGraph<VertexID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<VertexID_> diffs(g.num_vertices()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (VertexID_ n=0; n < g.num_vertices(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_vertices()]]; *sq_index = GenIndex<VertexID_, DestID_>(sq_offsets, *sq_neighs); //*sq_index = CSRGraph<VertexID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (VertexID_ n=0; n < g.num_vertices(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } void SquishCSR(const CSRGraph<VertexID_, DestID_, invert> &g, bool transpose, int** sq_rowptr, DestID_*** sq_index, DestID_** sq_neighs) { pvector<VertexID_> diffs(g.num_vertices()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (VertexID_ n=0; n < g.num_vertices(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_vertices()]]; *sq_rowptr = new DestID_[num_vertices_+1]; *sq_index = GenIndex<VertexID_, DestID_>(sq_offsets, *sq_neighs); //*sq_index = CSRGraph<VertexID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); for (int i = 0; i < num_vertices_+1; i ++) (*sq_rowptr)[i] = sq_offsets[i]; #pragma omp parallel for private(n_start) for (VertexID_ n=0; n < g.num_vertices(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } void SquishGraph(CSRGraph<VertexID_, DestID_, invert>& new_g, const CSRGraph<VertexID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; int *in_rowptr, *out_rowptr; int m = g.num_vertices(); int nnz = g.num_edges(); printf("Before cleaning: num_vertices %d num_edges %d\n", m, nnz); SquishCSR(g, false, &out_rowptr, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_rowptr, &in_index, &in_neighs); new_g.Setup(g.num_vertices(), out_rowptr, out_index, out_neighs, in_rowptr, in_index, in_neighs); } else new_g.Setup(g.num_vertices(), out_rowptr, out_index, out_neighs); } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void OldMakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<VertexID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_vertices_]]; *index = GenIndex<VertexID_, DestID_>(offsets, *neighs); //*index = CSRGraph<VertexID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<VertexID_>(e.v)], 1)] = GetSource(e); } } void MakeCSR(const EdgeList &el, bool transpose, int** rowptr, DestID_*** index, DestID_** neighs) { pvector<VertexID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_vertices_]]; *index = GenIndex<VertexID_, DestID_>(offsets, *neighs); //*index = CSRGraph<VertexID_, DestID_>::GenIndex(offsets, *neighs); *rowptr = new int[num_vertices_+1]; for (int i = 0; i < num_vertices_+1; i ++) (*rowptr)[i] = offsets[i]; #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<VertexID_>(e.v)], 1)] = GetSource(e); } } void MakeGraphFromEL(EdgeList &el, CSRGraph<VertexID_, DestID_, invert> &g, bool use_dag = false) { int *rowptr = nullptr, *inv_rowptr = nullptr; DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_vertices_ == -1) num_vertices_ = FindMaxVertexID(el)+1; if (needs_weights_) Generator<VertexID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &rowptr, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_rowptr, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) g.Setup(num_vertices_, rowptr, index, neighs); else g.Setup(num_vertices_, rowptr, index, neighs, inv_rowptr, inv_index, inv_neighs); } void MakeGraph(CSRGraph<VertexID_, DestID_, invert>& new_g, bool use_dag = false) { CSRGraph<VertexID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<VertexID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) r.ReadSerializedGraph(g); else el = r.ReadFile(needs_weights_); } else if (cli_.scale() != -1) { Generator<VertexID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } MakeGraphFromEL(el, g, use_dag); } SquishGraph(new_g, g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<VertexID_, DestID_, invert> RelabelByDegree( const CSRGraph<VertexID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, VertexID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_vertices()); #pragma omp parallel for for (VertexID_ n=0; n < g.num_vertices(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<VertexID_> degrees(g.num_vertices()); pvector<VertexID_> new_ids(g.num_vertices()); #pragma omp parallel for for (VertexID_ n=0; n < g.num_vertices(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_vertices()]]; DestID_** index = GenIndex<VertexID_, DestID_>(offsets, neighs); //DestID_** index = CSRGraph<VertexID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (VertexID_ u=0; u < g.num_vertices(); u++) { for (VertexID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<VertexID_, DestID_, invert>(g.num_vertices(), index, neighs); } }; typedef BuilderBase<VertexID, VertexID, WeightT> Builder; typedef BuilderBase<VertexID, WVertex, WeightT> WeightedBuilder; #endif // BUILDER_H_
rfw_random.h
/* Algorithm for Steiner Problem in Graphs Copyright (c) Microsoft Corporation All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once /* C++ Wrapper written by Microsoft Corporation on original C code by Nishimura and Matsumoto. Original header follows. */ /* A C-program for MT19937, with initialization improved 2002/1/26. Coded by Takuji Nishimura and Makoto Matsumoto. Before using, initialize the state by using init_genrand(seed) or init_by_array(init_key, key_length). Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Any feedback is very welcome. http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) */ #ifndef RFW_RANDOM_H #define RFW_RANDOM_H #include <cstdlib> #include <cstdio> class RFWRandom { public: static const unsigned long int maxvalue; /* Period parameters */ enum {N=624, M=397}; static const unsigned long int MATRIX_A; /* constant vector a */ static const unsigned long int UPPER_MASK; /* most significant w-r bits */ static const unsigned long int LOWER_MASK; /* least significant r bits */ private: static unsigned long mt[N]; /* the array for the state vector */ static int mti; /* mti==N+1 means mt[N] is not initialized */ /* initializes mt[N] with a seed */ static void init_genrand(unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti<N; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /* generates a random number on [0,0xffffffff]-interval */ static unsigned long genrand_int32(void) { unsigned long y; static unsigned long mag01[2]={0x0UL, MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= N) { /* generate N words at one time */ int kk; if (mti == N+1) /* if init_genrand() has not been called, */ init_genrand(5489UL); /* a default initial seed is used */ for (kk=0;kk<N-M;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk<N-1;kk++) { y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK); mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK); mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } public: //constructors RFWRandom () {randomize(1);} //randomize procedures static void randomize (unsigned long s) { if (s==0) s = 1; init_genrand(s); } static unsigned long getRand() {return genrand_int32();} //pick an integer uniformly at random between inf and sup (both inclusive) static int getInteger (int inf, int sup) { if (sup<=inf) return inf; unsigned long range, minallowed, u; range = (unsigned long)(sup-inf+1); //number of values allowed minallowed = (maxvalue % range) + 1; //restrict search space to avoid small numbers if (minallowed==range) minallowed = 0; do {u = getRand();} //repeat until a good number is found while (u < minallowed); return (inf + (int)(u % range)); //return a number in the range } static float getFloat () {return (float)getDouble();} //get a float number in [0;1] static double getDouble() {return getDoubleClosed();} //double in the range [0;1] static double getDoubleClosed() {return ((double)getRand()/(double)maxvalue);} //double in the range [0;1] static double getDoubleOpen() {return ((double)getRand()/((double)(maxvalue)+1.0));} //double in the range [0;1) static bool getBool () {return (getRand() & 1);} }; class RFWLocalRandom { private: //static const unsigned long int maxvalue; /* Period parameters */ //enum {N=624, M=397}; //static const unsigned long int MATRIX_A; /* constant vector a */ //static const unsigned long int UPPER_MASK; /* most significant w-r bits */ //static const unsigned long int LOWER_MASK; /* least significant r bits */ unsigned long mt[RFWRandom::N]; /* the array for the state vector */ int mti; /* mti==N+1 means mt[N] is not initialized */ /* initializes mt[N] with a seed */ void init_genrand(unsigned long s) { mt[0]= s & 0xffffffffUL; for (mti=1; mti<RFWRandom::N; mti++) { mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt[mti] &= 0xffffffffUL; /* for >32 bit machines */ } } /* generates a random number on [0,0xffffffff]-interval */ unsigned long genrand_int32(void) { unsigned long y; unsigned long mag01[2]={0x0UL, RFWRandom::MATRIX_A}; /* mag01[x] = x * MATRIX_A for x=0,1 */ if (mti >= RFWRandom::N) { /* generate N words at one time */ int kk; if (mti == RFWRandom::N+1) /* if init_genrand() has not been called, */ init_genrand(5489UL); /* a default initial seed is used */ for (kk=0;kk<RFWRandom::N-RFWRandom::M;kk++) { y = (mt[kk]&RFWRandom::UPPER_MASK)|(mt[kk+1]&RFWRandom::LOWER_MASK); mt[kk] = mt[kk+RFWRandom::M] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk<RFWRandom::N-1;kk++) { y = (mt[kk]&RFWRandom::UPPER_MASK)|(mt[kk+1]&RFWRandom::LOWER_MASK); mt[kk] = mt[kk+(RFWRandom::M-RFWRandom::N)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt[RFWRandom::N-1]&RFWRandom::UPPER_MASK)|(mt[0]&RFWRandom::LOWER_MASK); mt[RFWRandom::N-1] = mt[RFWRandom::M-1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mti = 0; } y = mt[mti++]; /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } public: //constructors RFWLocalRandom () {Randomize(1);} RFWLocalRandom (unsigned long s) {Randomize(s);} void CriticalRandomize() { //#pragma omp critical { //fprintf (stderr, "cr"); Randomize((unsigned int)RFWRandom::getInteger(0,2000000000)); } } void Randomize() { Randomize((unsigned int)RFWRandom::getInteger(0,2000000000)); } //randomize procedures void Randomize (unsigned long s) { if (s==0) s = 1; init_genrand(s); } unsigned long GetRand() {return genrand_int32();} //pick an integer uniformly at random between inf and sup (both inclusive) int GetInteger (int inf, int sup) { if (sup<=inf) return inf; unsigned long range, minallowed, u; range = (unsigned long)(sup-inf+1); //number of values allowed minallowed = (RFWRandom::maxvalue % range) + 1; //restrict search space to avoid small numbers if (minallowed==range) minallowed = 0; do {u = GetRand();} //repeat until a good number is found while (u < minallowed); return (inf + (int)(u % range)); //return a number in the range } float GetFloat () {return (float)GetDouble();} //get a float number in [0;1] double GetDouble() { return GetDoubleClosed(); } //double in the range [0;1] //double GetDouble() { double r = GetDoubleClosed(); fprintf(stderr, "<<< %.10f : %.10f : %d >>>", r, (double)RFWRandom::maxvalue, sizeof(RFWRandom::maxvalue)); return r; } //double in the range [0;1] double GetDoubleClosed() { return ((double)GetRand() / (double)RFWRandom::maxvalue); } //double in the range [0;1] double GetDoubleOpen() {return ((double)GetRand()/((double)(RFWRandom::maxvalue)+1.0));} //double in the range [0;1) bool GetBool () {return (GetRand() & 1);} }; #endif
basic_openmp.c
#include <stdio.h> #include <stdlib.h> int main(){ #pragma omp parallel { printf( "Hello World\n" ); } return EXIT_SUCCESS; }
bt.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - BT This benchmark is an OpenMP C version of the NPB BT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: R. Van der Wijngaart T. Harris M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "header.h" /* PMC */ #include <signal.h> #include <unistd.h> /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void compute_rhs(void); static void set_constants(void); static void verify(int no_time_steps, char *class, boolean *verified); static void x_solve(void); static void x_backsubstitute(void); static void x_solve_cell(void); static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]); static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]); static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]); static void binvrhs(double lhs[5][5], double r[5]); static void y_solve(void); static void y_backsubstitute(void); static void y_solve_cell(void); static void z_solve(void); static void z_backsubstitute(void); static void z_solve_cell(void); /*-------------------------------------------------------------------- program BT c-------------------------------------------------------------------*/ int main(int argc, char **argv) { int niter, step, n3; int nthreads = 1; double navg, mflops; double tmax; boolean verified; char class; FILE *fp; /*-------------------------------------------------------------------- c Root node reads input file (if it exists) else takes c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - BT Benchmark\n\n"); fp = fopen("inputbt.data", "r"); if (fp != NULL) { printf(" Reading from input file inputbt.data"); fscanf(fp, "%d", &niter); while (fgetc(fp) != '\n'); fscanf(fp, "%lg", &dt); while (fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]); fclose(fp); } else { printf(" No input file inputbt.data. Using compiled defaults\n"); niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if (grid_points[0] > IMAX || grid_points[1] > JMAX || grid_points[2] > KMAX) { printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); timer_clear(1); //PMC long cpid = getpid(); if(argc>=2){ printf("PID:%ld \n",cpid); kill(cpid, SIGSTOP); } timer_start(1); for (step = 1; step <= niter; step++) { if (step%20 == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } #pragma omp parallel { #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); //PMC if(argc>=2){ printf("BENCH COMPLETE!\n"); kill(cpid, SIGSTOP); } tmax = timer_read(1); verify(niter, &class, &verified); n3 = grid_points[0]*grid_points[1]*grid_points[2]; navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; if ( tmax != 0.0 ) { mflops = 1.0e-6*(double)niter* (3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax; } else { mflops = 0.0; } c_print_results("BT", class, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void add(void) { /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ int i, j, k, m; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { #pragma omp parallel compute_rhs(); #pragma omp parallel x_solve(); #pragma omp parallel y_solve(); #pragma omp parallel z_solve(); #pragma omp parallel add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[i][j][k][m] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { add = rhs[i][j][k][m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m <= 4; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i < grid_points[0]-1; i++) { im1 = i-1; ip1 = i+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tx2*(ue[ip1][1]-ue[im1][1])+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { i = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (m = 0; m < 5; m++) { for (i = 1*3; i <= grid_points[0]-3*1-1; i++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j < grid_points[1]-1; j++) { jm1 = j-1; jp1 = j+1; forcing[i][j][k][0] = forcing[i][j][k][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { j = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (m = 0; m < 5; m++) { for (j = 1*3; j <= grid_points[1]-3*1-1; j++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k < grid_points[2]-1; k++) { km1 = k-1; kp1 = k+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { k = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (m = 0; m < 5; m++) { for (k = 1*3; k <= grid_points[2]-3*1-1; k++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7] + xi*ce[m][10]))) + eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8] + eta*ce[m][11])))+ zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + zeta*ce[m][12]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < IMAX; i++) { for (j = 0; j < IMAX; j++) { for (k = 0; k < IMAX; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = 1.0; } } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &(Pface[ix][0][0])); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ i = 0; xi = 0.0; #pragma omp for nowait for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ i = grid_points[0]-1; xi = 1.0; #pragma omp for for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ j = 0; eta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ j = grid_points[1]-1; eta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ k = 0; zeta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ k = grid_points[2]-1; zeta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { #pragma omp parallel { int i, j, k, m, n; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zero the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { lhs[i][j][k][0][m][n] = 0.0; lhs[i][j][k][1][m][n] = 0.0; lhs[i][j][k][2][m][n] = 0.0; } } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but convenient c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { lhs[i][j][k][1][m][m] = 1.0; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side in the xi-direction c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c determine a (labeled f) and n jacobians c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (i = 0; i < grid_points[0]; i++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 1.0; fjac[ i][ j][ k][0][2] = 0.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = ( 2.0 - c2 ) * ( u[i][j][k][1] / u[i][j][k][0] ); fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 ); fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][1][4] = c2; fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][2][3] = 0.0; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][2] = 0.0; fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][1] * tmp1 ); fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( 3.0*u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 ); njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = con43 * c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in x direction c-------------------------------------------------------------------*/ for (i = 1; i < grid_points[0]-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0] - tmp1 * njac[i-1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1] - tmp1 * njac[i-1][j][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2] - tmp1 * njac[i-1][j][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3] - tmp1 * njac[i-1][j][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4] - tmp1 * njac[i-1][j][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0] - tmp1 * njac[i-1][j][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1] - tmp1 * njac[i-1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2] - tmp1 * njac[i-1][j][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3] - tmp1 * njac[i-1][j][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4] - tmp1 * njac[i-1][j][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0] - tmp1 * njac[i-1][j][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1] - tmp1 * njac[i-1][j][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2] - tmp1 * njac[i-1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3] - tmp1 * njac[i-1][j][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4] - tmp1 * njac[i-1][j][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0] - tmp1 * njac[i-1][j][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1] - tmp1 * njac[i-1][j][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2] - tmp1 * njac[i-1][j][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3] - tmp1 * njac[i-1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4] - tmp1 * njac[i-1][j][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0] - tmp1 * njac[i-1][j][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1] - tmp1 * njac[i-1][j][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2] - tmp1 * njac[i-1][j][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3] - tmp1 * njac[i-1][j][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4] - tmp1 * njac[i-1][j][k][4][4] - tmp1 * dx5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0] - tmp1 * njac[i+1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1] - tmp1 * njac[i+1][j][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2] - tmp1 * njac[i+1][j][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3] - tmp1 * njac[i+1][j][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4] - tmp1 * njac[i+1][j][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0] - tmp1 * njac[i+1][j][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1] - tmp1 * njac[i+1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2] - tmp1 * njac[i+1][j][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3] - tmp1 * njac[i+1][j][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4] - tmp1 * njac[i+1][j][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0] - tmp1 * njac[i+1][j][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1] - tmp1 * njac[i+1][j][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2] - tmp1 * njac[i+1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3] - tmp1 * njac[i+1][j][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4] - tmp1 * njac[i+1][j][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0] - tmp1 * njac[i+1][j][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1] - tmp1 * njac[i+1][j][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2] - tmp1 * njac[i+1][j][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3] - tmp1 * njac[i+1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4] - tmp1 * njac[i+1][j][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0] - tmp1 * njac[i+1][j][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1] - tmp1 * njac[i+1][j][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2] - tmp1 * njac[i+1][j][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3] - tmp1 * njac[i+1][j][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4] - tmp1 * njac[i+1][j][k][4][4] - tmp1 * dx5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the tri-diagonal matrix; c determine a (labeled f) and n jacobians for cell c c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 0.0; fjac[ i][ j][ k][0][2] = 1.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][3] = 0.0; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][2][2] = ( 2.0 - c2 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1; fjac[i][j][k][2][4] = c2; fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = 0.0; fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * u[i][j][k][4] * tmp1 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] * tmp2; fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + 3.0 * u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = con43 * c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } } } /*-------------------------------------------------------------------- c now joacobians set, so form left hand side in y direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0] - tmp1 * njac[i][j-1][k][0][0] - tmp1 * dy1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1] - tmp1 * njac[i][j-1][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2] - tmp1 * njac[i][j-1][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3] - tmp1 * njac[i][j-1][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4] - tmp1 * njac[i][j-1][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0] - tmp1 * njac[i][j-1][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1] - tmp1 * njac[i][j-1][k][1][1] - tmp1 * dy2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2] - tmp1 * njac[i][j-1][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3] - tmp1 * njac[i][j-1][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4] - tmp1 * njac[i][j-1][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0] - tmp1 * njac[i][j-1][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1] - tmp1 * njac[i][j-1][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2] - tmp1 * njac[i][j-1][k][2][2] - tmp1 * dy3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3] - tmp1 * njac[i][j-1][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4] - tmp1 * njac[i][j-1][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0] - tmp1 * njac[i][j-1][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1] - tmp1 * njac[i][j-1][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2] - tmp1 * njac[i][j-1][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3] - tmp1 * njac[i][j-1][k][3][3] - tmp1 * dy4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4] - tmp1 * njac[i][j-1][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0] - tmp1 * njac[i][j-1][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1] - tmp1 * njac[i][j-1][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2] - tmp1 * njac[i][j-1][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3] - tmp1 * njac[i][j-1][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4] - tmp1 * njac[i][j-1][k][4][4] - tmp1 * dy5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0] - tmp1 * njac[i][j+1][k][0][0] - tmp1 * dy1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1] - tmp1 * njac[i][j+1][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2] - tmp1 * njac[i][j+1][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3] - tmp1 * njac[i][j+1][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4] - tmp1 * njac[i][j+1][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0] - tmp1 * njac[i][j+1][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1] - tmp1 * njac[i][j+1][k][1][1] - tmp1 * dy2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2] - tmp1 * njac[i][j+1][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3] - tmp1 * njac[i][j+1][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4] - tmp1 * njac[i][j+1][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0] - tmp1 * njac[i][j+1][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1] - tmp1 * njac[i][j+1][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2] - tmp1 * njac[i][j+1][k][2][2] - tmp1 * dy3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3] - tmp1 * njac[i][j+1][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4] - tmp1 * njac[i][j+1][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0] - tmp1 * njac[i][j+1][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1] - tmp1 * njac[i][j+1][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2] - tmp1 * njac[i][j+1][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3] - tmp1 * njac[i][j+1][k][3][3] - tmp1 * dy4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4] - tmp1 * njac[i][j+1][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0] - tmp1 * njac[i][j+1][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1] - tmp1 * njac[i][j+1][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2] - tmp1 * njac[i][j+1][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3] - tmp1 * njac[i][j+1][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4] - tmp1 * njac[i][j+1][k][4][4] - tmp1 * dy5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the block-diagonal matrix; c determine c (labeled f) and s jacobians c---------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 0; k < grid_points[2]; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][j][k][0][0] = 0.0; fjac[i][j][k][0][1] = 0.0; fjac[i][j][k][0][2] = 0.0; fjac[i][j][k][0][3] = 1.0; fjac[i][j][k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][1][2] = 0.0; fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][2][1] = 0.0; fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1; fjac[i][j][k][3][3] = ( 2.0 - c2 ) * u[i][j][k][3] * tmp1; fjac[i][j][k][3][4] = c2; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 ) - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + 3.0*u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 )* tmp1; } } } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in z direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0] - tmp1 * njac[i][j][k-1][0][0] - tmp1 * dz1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1] - tmp1 * njac[i][j][k-1][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2] - tmp1 * njac[i][j][k-1][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3] - tmp1 * njac[i][j][k-1][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4] - tmp1 * njac[i][j][k-1][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0] - tmp1 * njac[i][j][k-1][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1] - tmp1 * njac[i][j][k-1][1][1] - tmp1 * dz2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2] - tmp1 * njac[i][j][k-1][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3] - tmp1 * njac[i][j][k-1][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4] - tmp1 * njac[i][j][k-1][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0] - tmp1 * njac[i][j][k-1][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1] - tmp1 * njac[i][j][k-1][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2] - tmp1 * njac[i][j][k-1][2][2] - tmp1 * dz3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3] - tmp1 * njac[i][j][k-1][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4] - tmp1 * njac[i][j][k-1][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0] - tmp1 * njac[i][j][k-1][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1] - tmp1 * njac[i][j][k-1][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2] - tmp1 * njac[i][j][k-1][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3] - tmp1 * njac[i][j][k-1][3][3] - tmp1 * dz4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4] - tmp1 * njac[i][j][k-1][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0] - tmp1 * njac[i][j][k-1][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1] - tmp1 * njac[i][j][k-1][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2] - tmp1 * njac[i][j][k-1][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3] - tmp1 * njac[i][j][k-1][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4] - tmp1 * njac[i][j][k-1][4][4] - tmp1 * dz5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0] - tmp1 * njac[i][j][k+1][0][0] - tmp1 * dz1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1] - tmp1 * njac[i][j][k+1][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2] - tmp1 * njac[i][j][k+1][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3] - tmp1 * njac[i][j][k+1][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4] - tmp1 * njac[i][j][k+1][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0] - tmp1 * njac[i][j][k+1][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1] - tmp1 * njac[i][j][k+1][1][1] - tmp1 * dz2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2] - tmp1 * njac[i][j][k+1][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3] - tmp1 * njac[i][j][k+1][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4] - tmp1 * njac[i][j][k+1][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0] - tmp1 * njac[i][j][k+1][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1] - tmp1 * njac[i][j][k+1][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2] - tmp1 * njac[i][j][k+1][2][2] - tmp1 * dz3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3] - tmp1 * njac[i][j][k+1][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4] - tmp1 * njac[i][j][k+1][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0] - tmp1 * njac[i][j][k+1][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1] - tmp1 * njac[i][j][k+1][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2] - tmp1 * njac[i][j][k+1][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3] - tmp1 * njac[i][j][k+1][3][3] - tmp1 * dz4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4] - tmp1 * njac[i][j][k+1][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0] - tmp1 * njac[i][j][k+1][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1] - tmp1 * njac[i][j][k+1][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2] - tmp1 * njac[i][j][k+1][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3] - tmp1 * njac[i][j][k+1][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4] - tmp1 * njac[i][j][k+1][4][4] - tmp1 * dz5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { int i, j, k, m; double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { rho_inv = 1.0/u[i][j][k][0]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[i][j][k][1] * rho_inv; vs[i][j][k] = u[i][j][k][2] * rho_inv; ws[i][j][k] = u[i][j][k][3] * rho_inv; square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = forcing[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + u[i-1][j][k][0]) - tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]); rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + u[i-1][j][k][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[i+1][j][k][1]*up1 - u[i-1][j][k][1]*um1 + (u[i+1][j][k][4]- square[i+1][j][k]- u[i-1][j][k][4]+ square[i-1][j][k])* c2); rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i+1][j][k][2] - 2.0*u[i][j][k][2] + u[i-1][j][k][2]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[i+1][j][k][2]*up1 - u[i-1][j][k][2]*um1); rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i+1][j][k][3] - 2.0*u[i][j][k][3] + u[i-1][j][k][3]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[i+1][j][k][3]*up1 - u[i-1][j][k][3]*um1); rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i+1][j][k][4] - 2.0*u[i][j][k][4] + u[i-1][j][k][4]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i-1][j][k][4]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[i+1][j][k][4] - c2*square[i+1][j][k])*up1 - (c1*u[i-1][j][k][4] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } i = 2; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } #pragma omp for nowait for (i = 3; i < grid_points[0]-3; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m] ); } } } } i = grid_points[0]-3; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] ); } } } i = grid_points[0]-2; #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] + 5.0*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + u[i][j-1][k][0]) - ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]); rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + u[i][j-1][k][1]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[i][j+1][k][1]*vp1 - u[i][j-1][k][1]*vm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + u[i][j-1][k][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[i][j+1][k][2]*vp1 - u[i][j-1][k][2]*vm1 + (u[i][j+1][k][4] - square[i][j+1][k] - u[i][j-1][k][4] + square[i][j-1][k]) *c2); rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + u[i][j-1][k][3]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[i][j+1][k][3]*vp1 - u[i][j-1][k][3]*vm1); rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + u[i][j-1][k][4]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j-1][k][4]*rho_i[i][j-1][k]) - ty2 * ((c1*u[i][j+1][k][4] - c2*square[i][j+1][k]) * vp1 - (c1*u[i][j-1][k][4] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } j = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 3; j < grid_points[1]-3; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m] ); } } } } j = grid_points[1]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] ); } } } j = grid_points[1]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] + 5.*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + u[i][j][k-1][0]) - tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]); rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + u[i][j][k-1][1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[i][j][k+1][1]*wp1 - u[i][j][k-1][1]*wm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + u[i][j][k-1][2]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[i][j][k+1][2]*wp1 - u[i][j][k-1][2]*wm1); rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + u[i][j][k-1][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[i][j][k+1][3]*wp1 - u[i][j][k-1][3]*wm1 + (u[i][j][k+1][4] - square[i][j][k+1] - u[i][j][k-1][4] + square[i][j][k-1]) *c2); rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + u[i][j][k-1][4]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j][k-1][4]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[i][j][k+1][4] - c2*square[i][j][k+1])*wp1 - (c1*u[i][j][k-1][4] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } k = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 3; k < grid_points[2]-3; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m] ); } } } } k = grid_points[2]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] ); } } } k = grid_points[2]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 5.0*u[i][j][k][m] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { for (i = 1; i < grid_points[0]-1; i++) { rhs[i][j][k][m] = rhs[i][j][k][m] * dt; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *class, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine c-------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level c-------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing c-------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02 c-------------------------------------------------------------------*/ if (grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 60) { *class = 'S'; dtref = 1.0e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.7034283709541311e-01; xcrref[1] = 1.2975252070034097e-02; xcrref[2] = 3.2527926989486055e-02; xcrref[3] = 2.6436421275166801e-02; xcrref[4] = 1.9211784131744430e-01; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.9976913345811579e-04; xceref[1] = 4.5195666782961927e-05; xceref[2] = 7.3973765172921357e-05; xceref[3] = 7.3821238632439731e-05; xceref[4] = 8.9269630987491446e-04; /*-------------------------------------------------------------------- c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 24 && grid_points[1] == 24 && grid_points[2] == 24 && no_time_steps == 200) { *class = 'W'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.1125590409344e+03; xcrref[1] = 0.1180007595731e+02; xcrref[2] = 0.2710329767846e+02; xcrref[3] = 0.2469174937669e+02; xcrref[4] = 0.2638427874317e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.4419655736008e+01; xceref[1] = 0.4638531260002e+00; xceref[2] = 0.1011551749967e+01; xceref[3] = 0.9235878729944e+00; xceref[4] = 0.1018045837718e+02; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 200) { *class = 'A'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.0806346714637264e+02; xcrref[1] = 1.1319730901220813e+01; xcrref[2] = 2.5974354511582465e+01; xcrref[3] = 2.3665622544678910e+01; xcrref[4] = 2.5278963211748344e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.2348416040525025e+00; xceref[1] = 4.4390282496995698e-01; xceref[2] = 9.6692480136345650e-01; xceref[3] = 8.8302063039765474e-01; xceref[4] = 9.7379901770829278e+00; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 200 time steps, c with DT = 3.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 200) { *class = 'B'; dtref = 3.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.4233597229287254e+03; xcrref[1] = 9.9330522590150238e+01; xcrref[2] = 3.5646025644535285e+02; xcrref[3] = 3.2485447959084092e+02; xcrref[4] = 3.2707541254659363e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 5.2969847140936856e+01; xceref[1] = 4.4632896115670668e+00; xceref[2] = 1.3122573342210174e+01; xceref[3] = 1.2006925323559144e+01; xceref[4] = 1.2459576151035986e+02; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 200 time steps, c with DT = 1.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 200) { *class = 'C'; dtref = 1.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.62398116551764615e+04; xcrref[1] = 0.50793239190423964e+03; xcrref[2] = 0.15423530093013596e+04; xcrref[3] = 0.13302387929291190e+04; xcrref[4] = 0.11604087428436455e+05; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.16462008369091265e+03; xceref[1] = 0.11497107903824313e+02; xceref[2] = 0.41207446207461508e+02; xceref[3] = 0.37087651059694167e+02; xceref[4] = 0.36211053051841265e+03; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. c-------------------------------------------------------------------*/ if (*class != 'U') { printf(" Verification being performed for class %1c\n", *class); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified == TRUE) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c c Performs line solves in X direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c c-------------------------------------------------------------------*/ lhsx(); x_solve_cell(); x_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(isize)=rhs[isize) c else assume U(isize) is loaded in un pack backsub_info c so just use it c after call u(istart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (i = grid_points[0]-2; i >= 0; i--) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve_cell(void) { /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(IMAX) and rhs'(IMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i,j,k,isize; isize = grid_points[0]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(0,j,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[0][j][k][BB], lhs[0][j][k][CC], rhs[0][j][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (i = 1; i < isize; i++) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(i) = rhs(i) - A*rhs(i-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i-1][j][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(i) = B(i) - C(i-1)*A(i) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i-1][j][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(isize) = rhs(isize) - A*rhs(isize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[isize][j][k][AA], rhs[isize-1][j][k], rhs[isize][j][k]); /*-------------------------------------------------------------------- c B(isize) = B(isize) - C(isize-1)*A(isize) c-------------------------------------------------------------------*/ matmul_sub(lhs[isize][j][k][AA], lhs[isize-1][j][k][CC], lhs[isize][j][k][BB]); /*-------------------------------------------------------------------- c multiply rhs() by b_inverse() and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][k][BB], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts bvec=bvec - ablock*avec c-------------------------------------------------------------------*/ int i; for (i = 0; i < 5; i++) { /*-------------------------------------------------------------------- c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) c $ - lhs[i,1,ablock,ia,ja,ka,acell)* c-------------------------------------------------------------------*/ bvec[i] = bvec[i] - ablock[i][0]*avec[0] - ablock[i][1]*avec[1] - ablock[i][2]*avec[2] - ablock[i][3]*avec[3] - ablock[i][4]*avec[4]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k) c-------------------------------------------------------------------*/ int j; for (j = 0; j < 5; j++) { cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j] - ablock[0][1]*bblock[1][j] - ablock[0][2]*bblock[2][j] - ablock[0][3]*bblock[3][j] - ablock[0][4]*bblock[4][j]; cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j] - ablock[1][1]*bblock[1][j] - ablock[1][2]*bblock[2][j] - ablock[1][3]*bblock[3][j] - ablock[1][4]*bblock[4][j]; cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j] - ablock[2][1]*bblock[1][j] - ablock[2][2]*bblock[2][j] - ablock[2][3]*bblock[3][j] - ablock[2][4]*bblock[4][j]; cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j] - ablock[3][1]*bblock[1][j] - ablock[3][2]*bblock[2][j] - ablock[3][3]*bblock[3][j] - ablock[3][4]*bblock[4][j]; cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j] - ablock[4][1]*bblock[1][j] - ablock[4][2]*bblock[2][j] - ablock[4][3]*bblock[3][j] - ablock[4][4]*bblock[4][j]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; c[0][0] = c[0][0]*pivot; c[0][1] = c[0][1]*pivot; c[0][2] = c[0][2]*pivot; c[0][3] = c[0][3]*pivot; c[0][4] = c[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; c[1][0] = c[1][0] - coeff*c[0][0]; c[1][1] = c[1][1] - coeff*c[0][1]; c[1][2] = c[1][2] - coeff*c[0][2]; c[1][3] = c[1][3] - coeff*c[0][3]; c[1][4] = c[1][4] - coeff*c[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; c[2][0] = c[2][0] - coeff*c[0][0]; c[2][1] = c[2][1] - coeff*c[0][1]; c[2][2] = c[2][2] - coeff*c[0][2]; c[2][3] = c[2][3] - coeff*c[0][3]; c[2][4] = c[2][4] - coeff*c[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; c[3][0] = c[3][0] - coeff*c[0][0]; c[3][1] = c[3][1] - coeff*c[0][1]; c[3][2] = c[3][2] - coeff*c[0][2]; c[3][3] = c[3][3] - coeff*c[0][3]; c[3][4] = c[3][4] - coeff*c[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; c[4][0] = c[4][0] - coeff*c[0][0]; c[4][1] = c[4][1] - coeff*c[0][1]; c[4][2] = c[4][2] - coeff*c[0][2]; c[4][3] = c[4][3] - coeff*c[0][3]; c[4][4] = c[4][4] - coeff*c[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; c[1][0] = c[1][0]*pivot; c[1][1] = c[1][1]*pivot; c[1][2] = c[1][2]*pivot; c[1][3] = c[1][3]*pivot; c[1][4] = c[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; c[0][0] = c[0][0] - coeff*c[1][0]; c[0][1] = c[0][1] - coeff*c[1][1]; c[0][2] = c[0][2] - coeff*c[1][2]; c[0][3] = c[0][3] - coeff*c[1][3]; c[0][4] = c[0][4] - coeff*c[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; c[2][0] = c[2][0] - coeff*c[1][0]; c[2][1] = c[2][1] - coeff*c[1][1]; c[2][2] = c[2][2] - coeff*c[1][2]; c[2][3] = c[2][3] - coeff*c[1][3]; c[2][4] = c[2][4] - coeff*c[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; c[3][0] = c[3][0] - coeff*c[1][0]; c[3][1] = c[3][1] - coeff*c[1][1]; c[3][2] = c[3][2] - coeff*c[1][2]; c[3][3] = c[3][3] - coeff*c[1][3]; c[3][4] = c[3][4] - coeff*c[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; c[4][0] = c[4][0] - coeff*c[1][0]; c[4][1] = c[4][1] - coeff*c[1][1]; c[4][2] = c[4][2] - coeff*c[1][2]; c[4][3] = c[4][3] - coeff*c[1][3]; c[4][4] = c[4][4] - coeff*c[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; c[2][0] = c[2][0]*pivot; c[2][1] = c[2][1]*pivot; c[2][2] = c[2][2]*pivot; c[2][3] = c[2][3]*pivot; c[2][4] = c[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; c[0][0] = c[0][0] - coeff*c[2][0]; c[0][1] = c[0][1] - coeff*c[2][1]; c[0][2] = c[0][2] - coeff*c[2][2]; c[0][3] = c[0][3] - coeff*c[2][3]; c[0][4] = c[0][4] - coeff*c[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; c[1][0] = c[1][0] - coeff*c[2][0]; c[1][1] = c[1][1] - coeff*c[2][1]; c[1][2] = c[1][2] - coeff*c[2][2]; c[1][3] = c[1][3] - coeff*c[2][3]; c[1][4] = c[1][4] - coeff*c[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; c[3][0] = c[3][0] - coeff*c[2][0]; c[3][1] = c[3][1] - coeff*c[2][1]; c[3][2] = c[3][2] - coeff*c[2][2]; c[3][3] = c[3][3] - coeff*c[2][3]; c[3][4] = c[3][4] - coeff*c[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; c[4][0] = c[4][0] - coeff*c[2][0]; c[4][1] = c[4][1] - coeff*c[2][1]; c[4][2] = c[4][2] - coeff*c[2][2]; c[4][3] = c[4][3] - coeff*c[2][3]; c[4][4] = c[4][4] - coeff*c[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; c[3][0] = c[3][0]*pivot; c[3][1] = c[3][1]*pivot; c[3][2] = c[3][2]*pivot; c[3][3] = c[3][3]*pivot; c[3][4] = c[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; c[0][0] = c[0][0] - coeff*c[3][0]; c[0][1] = c[0][1] - coeff*c[3][1]; c[0][2] = c[0][2] - coeff*c[3][2]; c[0][3] = c[0][3] - coeff*c[3][3]; c[0][4] = c[0][4] - coeff*c[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; c[1][0] = c[1][0] - coeff*c[3][0]; c[1][1] = c[1][1] - coeff*c[3][1]; c[1][2] = c[1][2] - coeff*c[3][2]; c[1][3] = c[1][3] - coeff*c[3][3]; c[1][4] = c[1][4] - coeff*c[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; c[2][0] = c[2][0] - coeff*c[3][0]; c[2][1] = c[2][1] - coeff*c[3][1]; c[2][2] = c[2][2] - coeff*c[3][2]; c[2][3] = c[2][3] - coeff*c[3][3]; c[2][4] = c[2][4] - coeff*c[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; c[4][0] = c[4][0] - coeff*c[3][0]; c[4][1] = c[4][1] - coeff*c[3][1]; c[4][2] = c[4][2] - coeff*c[3][2]; c[4][3] = c[4][3] - coeff*c[3][3]; c[4][4] = c[4][4] - coeff*c[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; c[4][0] = c[4][0]*pivot; c[4][1] = c[4][1]*pivot; c[4][2] = c[4][2]*pivot; c[4][3] = c[4][3]*pivot; c[4][4] = c[4][4]*pivot; r[4] = r[4] *pivot; coeff = lhs[0][4]; c[0][0] = c[0][0] - coeff*c[4][0]; c[0][1] = c[0][1] - coeff*c[4][1]; c[0][2] = c[0][2] - coeff*c[4][2]; c[0][3] = c[0][3] - coeff*c[4][3]; c[0][4] = c[0][4] - coeff*c[4][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; c[1][0] = c[1][0] - coeff*c[4][0]; c[1][1] = c[1][1] - coeff*c[4][1]; c[1][2] = c[1][2] - coeff*c[4][2]; c[1][3] = c[1][3] - coeff*c[4][3]; c[1][4] = c[1][4] - coeff*c[4][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; c[2][0] = c[2][0] - coeff*c[4][0]; c[2][1] = c[2][1] - coeff*c[4][1]; c[2][2] = c[2][2] - coeff*c[4][2]; c[2][3] = c[2][3] - coeff*c[4][3]; c[2][4] = c[2][4] - coeff*c[4][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; c[3][0] = c[3][0] - coeff*c[4][0]; c[3][1] = c[3][1] - coeff*c[4][1]; c[3][2] = c[3][2] - coeff*c[4][2]; c[3][3] = c[3][3] - coeff*c[4][3]; c[3][4] = c[3][4] - coeff*c[4][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvrhs( double lhs[5][5], double r[5] ) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; r[4] = r[4] *pivot; coeff = lhs[0][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Y direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix][ c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsy(); y_solve_cell(); y_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell][ then generate U(jsize)=rhs(jsize) c else assume U(jsize) is loaded in un pack backsub_info c so just use it c after call u(jstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (j = grid_points[1]-2; j >= 0; j--) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(JMAX) and rhs'(JMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, jsize; jsize = grid_points[1]-1; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(i,0,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][0][k][BB], lhs[i][0][k][CC], rhs[i][0][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (j = 1; j < jsize; j++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(j-1) from lhs_vector(j) c c rhs(j) = rhs(j) - A*rhs(j-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j-1][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(j) = B(j) - C(j-1)*A(j) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j-1][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][jsize][k][AA], rhs[i][jsize-1][k], rhs[i][jsize][k]); /*-------------------------------------------------------------------- c B(jsize) = B(jsize) - C(jsize-1)*A(jsize) c call matmul_sub(aa,i,jsize,k,c, c $ cc,i,jsize-1,k,c,BB,i,jsize,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][jsize][k][AA], lhs[i][jsize-1][k][CC], lhs[i][jsize][k][BB]); /*-------------------------------------------------------------------- c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][jsize][k][BB], rhs[i][jsize][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Z direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsz(); z_solve_cell(); z_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(ksize)=rhs(ksize) c else assume U(ksize) is loaded in un pack backsub_info c so just use it c after call u(kstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = grid_points[2]-2; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(KMAX) and rhs'(KMAX) will be sent to next cell. c-------------------------------------------------------------------*/ int i,j,k,ksize; ksize = grid_points[2]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c multiply c(i,j,0) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][0][BB], lhs[i][j][0][CC], rhs[i][j][0] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (k = 1; k < ksize; k++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(k-1) from lhs_vector(k) c c rhs(k) = rhs(k) - A*rhs(k-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j][k-1], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(k) = B(k) - C(k-1)*A(k) c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j][k-1][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- c Now finish up special cases for last cell c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][ksize][AA], rhs[i][j][ksize-1], rhs[i][j][ksize]); /*-------------------------------------------------------------------- c B(ksize) = B(ksize) - C(ksize-1)*A(ksize) c call matmul_sub(aa,i,j,ksize,c, c $ cc,i,j,ksize-1,c,BB,i,j,ksize) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][ksize][AA], lhs[i][j][ksize-1][CC], lhs[i][j][ksize][BB]); /*-------------------------------------------------------------------- c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][ksize][BB], rhs[i][j][ksize] ); } } }
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. void finalize(Function *Fn = nullptr); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location at which code might need to be generated or a location that is /// the target of control transfer. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. /// \param ContinuationBB is the basic block target to leave the body. /// /// Note that all blocks pointed to by the arguments have terminators. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { template <typename T, typename U> LocationDescription(const IRBuilder<T, U> &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// TODO: Workshare loops with static scheduling may contain up to two loops /// that fulfill the requirements of an OpenMP canonical loop. One for /// iterating over all iterations of a chunk and another one for iterating /// over all chunks that are executed on the same thread. Returning /// CanonicalLoopInfo objects representing them may eventually be useful for /// the apply clause planned in OpenMP 6.0, but currently whether these are /// canonical loops is irrelevant. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Fully unroll a loop. /// /// Instead of unrolling the loop immediately (and duplicating its body /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop /// metadata. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop); /// Fully or partially unroll a loop. How the loop is unrolled is determined /// using LLVM's LoopUnrollPass. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop); /// Partially unroll a loop. /// /// The CanonicalLoopInfo of the unrolled loop for use with chained /// loop-associated directive can be requested using \p UnrolledCLI. Not /// needing the CanonicalLoopInfo allows more efficient code generation by /// deferring the actual unrolling to the LoopUnrollPass using loop metadata. /// A loop-associated directive applied to the unrolled loop needs to know the /// new trip count which means that if using a heuristically determined unroll /// factor (\p Factor == 0), that factor must be computed immediately. We are /// using the same logic as the LoopUnrollPass to derived the unroll factor, /// but which assumes that some canonicalization has taken place (e.g. /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform /// better when the unrolled loop's CanonicalLoopInfo is not needed. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. /// \param Factor The factor to unroll the loop by. A factor of 0 /// indicates that a heuristic should be used to determine /// the unroll-factor. /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the /// partially unrolled loop. Otherwise, uses loop metadata /// to defer unrolling to the LoopUnrollPass. void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI); /// Add metadata to simd-ize a loop. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to simd-ize. void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction, as well as /// the element type of these pointers. They are expected to atomically /// update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) { assert(cast<PointerType>(Variable->getType()) ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type"); } /// Reduction element type, must match pointee type of variable. Type *ElementType; /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param ReductionInfos A list of info on each reduction variable. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc, uint32_t &SrcLocStrSize); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); /// Create a hidden global flag \p Name in the module with initial value \p /// Value. GlobalValue *createGlobalFlag(unsigned Value, StringRef Name); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB; SmallVector<Value *, 2> ExcludeArgsFromAggregate; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArg The argument types. /// \param MapnamesArg The argument names. /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param NumOperands Number of operands in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the masked. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the critical. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp ordered depend (source | sink)' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param NumLoops The number of loops in depend clause. /// \param StoreValues The value will be stored in vector address. /// \param Name The name of alloca instruction. /// \param IsDependSource If true, depend source; otherwise, depend sink. /// /// \return The insertion position *after* the ordered. InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef<llvm::Value *> StoreValues, const Twine &Name, bool IsDependSource); /// Generator for '#omp ordered [threads | simd]' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param IsThreads If true, with threads clause or without clause; /// otherwise, with simd clause; /// /// \returns The insertion position *after* the ordered. InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// Create a runtime call for __tgt_interop_init /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param InteropType type of interop operation /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_init call CallInst *createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_destroy /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_destroy call CallInst *createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_use /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_use call CallInst *createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param XElemTy The element type of the atomic pointer. /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X, Type *XElemTy, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; Type *ElemTy = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocIP Instruction to create AllocaInst before. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const; /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0); } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return Exit->getSingleSuccessor(); } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Preheader = getPreheader(); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Body = getBody(); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *After = getAfter(); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
StreamTriad_par1.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "timer.h" int main(int argc, char *argv[]){ int nsize = 20000000, ntimes=16; double a[nsize]; double b[nsize]; double c[nsize]; struct timespec tstart; // initializing data and arrays double scalar = 3.0, time_sum = 0.0; #pragma omp target teams distribute parallel for simd for (int i=0; i<nsize; i++) { a[i] = 1.0; b[i] = 2.0; } for (int k=0; k<ntimes; k++){ cpu_timer_start(&tstart); // stream triad loop #pragma omp target teams distribute parallel for simd for (int i=0; i<nsize; i++){ c[i] = a[i] + scalar*b[i]; } time_sum += cpu_timer_stop(tstart); } printf("Average runtime for stream triad loop is %lf secs\n", time_sum/ntimes); return(0); }
GB_binop__le_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_fp32 // A.*B function (eWiseMult): GB_AemultB__le_fp32 // A*D function (colscale): GB_AxD__le_fp32 // D*A function (rowscale): GB_DxB__le_fp32 // C+=B function (dense accum): GB_Cdense_accumB__le_fp32 // C+=b function (dense accum): GB_Cdense_accumb__le_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_fp32 // C=scalar+B GB_bind1st__le_fp32 // C=scalar+B' GB_bind1st_tran__le_fp32 // C=A+scalar GB_bind2nd__le_fp32 // C=A'+scalar GB_bind2nd_tran__le_fp32 // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_FP32 || GxB_NO_LE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__le_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_uint32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_int16 // op(A') function: GB_tran__ainv_uint32_int16 // C type: uint32_t // A type: int16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_int16 ( uint32_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__eq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fp32) // A*D function (colscale): GB (_AxD__eq_fp32) // D*A function (rowscale): GB (_DxB__eq_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fp32) // C=scalar+B GB (_bind1st__eq_fp32) // C=scalar+B' GB (_bind1st_tran__eq_fp32) // C=A+scalar GB (_bind2nd__eq_fp32) // C=A'+scalar GB (_bind2nd_tran__eq_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireCriticalMemory(sizeof(*cache_info)); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); if ((cache_nexus == (NexusInfo **) NULL) || (clone_nexus == (NexusInfo **) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache == (void *) NULL) return; image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache(image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->read_mask != cache_info->read_mask) || (image->write_mask != cache_info->write_mask) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=time((time_t *) NULL); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->type == DiskCache) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(cache_info->number_channels*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(cache_info->number_channels*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelsFromNexus() method is: % % Quantum *GetVirtualPixelsFromNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if (offset < 0L) modulo.quotient--; modulo.remainder=offset-modulo.quotient*(ssize_t) extent; return(modulo); } MagickPrivate const Quantum *GetVirtualPixelsFromNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; RectangleInfo region; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,nexus_info, exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ s=(unsigned char *) nexus_info->metacontent; virtual_nexus=AcquirePixelCacheNexus(1); if (virtual_nexus == (NexusInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) length*cache_info->number_channels* sizeof(*p)); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*cache_info->number_channels*sizeof(*p)); q+=length*cache_info->number_channels; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->read_mask=image->read_mask; cache_info->write_mask=image->write_mask; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) cache_info->pixels=source_info.pixels; else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ number_pixels*cache_info->number_channels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(SyncImagePixelCache(image,exception)); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->read_mask=cache_info->read_mask; clone_info->write_mask=cache_info->write_mask; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region,nexus_info, exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) nexus_info->length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelCacheAuthentic( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset* cache_info->number_channels) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static Quantum *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); if ((region->width == 0) || (region->height == 0)) return((Quantum *) NULL); nexus_info->region=(*region); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; if (number_pixels == 0) return((Quantum *) NULL); if ((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && ((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) && ((nexus_info->region.width == cache_info->columns) || ((nexus_info->region.width % cache_info->columns) == 0))))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ length=number_pixels*cache_info->number_channels*sizeof(Quantum); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; if (nexus_info->cache == (Quantum *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((Quantum *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+number_pixels* cache_info->number_channels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsPixelCacheAuthentic(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset*cache_info->number_channels; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
parallel-simple.c
/* * parallel-simple.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; #pragma omp parallel num_threads(2) shared(var) { if (omp_get_thread_num() == 1) { var++; } } // implicit barrier var++; fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK-NOT: Warning: please export TSAN_OPTIONS // CHECK: DONE
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads = atoi(argv[1]); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number #pragma omp parallel { int rank = omp_get_thread_num(); long int seed = rank; srand48_r(seed, drandData+rank); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; double startTime = omp_get_wtime(); #pragma omp parallel for reduction(+:Ncircle) for( long long int n=0; n<Ntrials; n++) { int rank = omp_get_thread_num(); double rand1; double rand2; //gererate two random numbers (use the thread id to offset drandData) drand48_r(drandData+rank, &rand1); drand48_r(drandData+rank, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); // printf("Our estimate of pi is %g \n", pi); } // double runTime = omp_get_wtime(); // printf("The run time was %f \n", runTime-startTime); } double runTime = omp_get_wtime(); printf("The run time was %f \n", runTime-startTime); double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g \n", pi); }
convolution_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __AVX__ static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __AVX__ _mm256_storeu_ps(tmpptr, _mm256_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __SSE__ tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr+1); __m256 _sum2 = _mm256_broadcast_ss(biasptr+2); __m256 _sum3 = _mm256_broadcast_ss(biasptr+3); __m256 _sum4 = _mm256_broadcast_ss(biasptr+4); __m256 _sum5 = _mm256_broadcast_ss(biasptr+5); __m256 _sum6 = _mm256_broadcast_ss(biasptr+6); __m256 _sum7 = _mm256_broadcast_ss(biasptr+7); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _va4 = _mm256_broadcast_ss(va+4); __m256 _va5 = _mm256_broadcast_ss(va+5); __m256 _va6 = _mm256_broadcast_ss(va+6); __m256 _va7 = _mm256_broadcast_ss(va+7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __AVX__ __m256 _sum0_7 = _mm256_loadu_ps(biasptr); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb+1); __m256 _vb2 = _mm256_broadcast_ss(vb+2); __m256 _vb3 = _mm256_broadcast_ss(vb+3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va+8); __m256 _va2 = _mm256_loadu_ps(va+16); __m256 _va3 = _mm256_loadu_ps(va+24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k<L; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7);// sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8 + (i%8)/4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr+1); __m256 _sum2 = _mm256_broadcast_ss(biasptr+2); __m256 _sum3 = _mm256_broadcast_ss(biasptr+3); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8 + (i%8)/4); #if __AVX__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3);// sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(&bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __AVX__ output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); int k=0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); vb += 4; __m128 _k0 = _mm_loadu_ps(va); va += 4; _sum0 = _mm_fmadd_ps(_p0, _k0, _sum0); } float output_sum0[4] = {0.f}; _mm_storeu_ps(output_sum0, _sum0); float sum0 = bias0 + output_sum0[0] + output_sum0[1] + output_sum0[2] + output_sum0[3]; #else float sum0 = bias0; #endif // __AVX__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #else static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 4 x 4 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/4 + p%4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4); for (int q=0; q<inch*kernel_size; q++) { #if __SSE__ _mm_storeu_ps(tmpptr, _mm_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; #endif // __SSE__ tmpptr += 4; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4 + i%4); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(biasptr[0]); __m128 _sum1 = _mm_set1_ps(biasptr[1]); __m128 _sum2 = _mm_set1_ps(biasptr[2]); __m128 _sum3 = _mm_set1_ps(biasptr[3]); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 // k1 _vb = _mm_loadu_ps(vb+4); _va0 = _mm_set1_ps(va[4]); _va1 = _mm_set1_ps(va[5]); _va2 = _mm_set1_ps(va[6]); _va3 = _mm_set1_ps(va[7]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a10-a13) * k01 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a10-a13) * k11 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a10-a13) * k21 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a10-a13) * k31 // k2 _vb = _mm_loadu_ps(vb+8); _va0 = _mm_set1_ps(va[8]); _va1 = _mm_set1_ps(va[9]); _va2 = _mm_set1_ps(va[10]); _va3 = _mm_set1_ps(va[11]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a20-a23) * k02 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a20-a23) * k12 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a20-a23) * k22 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a20-a23) * k32 // k3 _vb = _mm_loadu_ps(vb+12); _va0 = _mm_set1_ps(va[12]); _va1 = _mm_set1_ps(va[13]); _va2 = _mm_set1_ps(va[14]); _va3 = _mm_set1_ps(va[15]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a30-a33) * k03 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a30-a33) * k13 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a30-a33) * k23 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a30-a33) * k33 va += 16; vb += 16; } for (; k<L; k++) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 va += 4; vb += 4; } _mm_storeu_ps(output0, _sum0); _mm_storeu_ps(output1, _sum1); _mm_storeu_ps(output2, _sum2); _mm_storeu_ps(output3, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+4]; sum1[n] += va[1] * vb[n+4]; sum2[n] += va[2] * vb[n+4]; sum3[n] += va[3] * vb[n+4]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+12]; sum1[n] += va[1] * vb[n+12]; sum2[n] += va[2] * vb[n+12]; sum3[n] += va[3] * vb[n+12]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+20]; sum1[n] += va[1] * vb[n+20]; sum2[n] += va[2] * vb[n+20]; sum3[n] += va[3] * vb[n+20]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+28]; sum1[n] += va[1] * vb[n+28]; sum2[n] += va[2] * vb[n+28]; sum3[n] += va[3] * vb[n+28]; va -= 28; } va += 32; vb += 32; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __SSE__ output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));// sum0 += (k00-k30) * a00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1));// sum1 += (k01-k31) * a10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2));// sum2 += (k02-k32) * a20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3));// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0));// sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3[0]; output1[0] = _sum0_3[1]; output2[0] = _sum0_3[2]; output3[0] = _sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __SSE__ output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4 + i%4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); __m128 _vb0 = _mm_loadu_ps(vb); __m128 _vb1 = _mm_loadu_ps(vb+4); __m128 _vb2 = _mm_loadu_ps(vb+8); __m128 _vb3 = _mm_loadu_ps(vb+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0));// sum0 = (a00-a03) * k00 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1));// sum0 += (a10-a13) * k01 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2));// sum0 += (a20-a23) * k02 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3));// sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k<L; k++) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _vb0 = _mm_loadu_ps(vb); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } _mm_storeu_ps(output, _sum0); #else float sum[4] = {0}; int k=0; for (; k+3<L; k=k+4) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+4]; sum[n] += va[2] * vb[n+8]; sum[n] += va[3] * vb[n+12]; //sum[n] += va[4] * vb[n+16]; //sum[n] += va[5] * vb[n+20]; //sum[n] += va[6] * vb[n+24]; //sum[n] += va[7] * vb[n+28]; } va += 4; vb += 16; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = sum[n] + bias0; } #endif // __SSE__ output += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4 + i%4); int k=0; #if __SSE__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } float sum0 = bias0 + _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = bias0; #endif // __SSE__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #endif
convolution_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack4to1_bf16s_neon(const Mat& weight_data, Mat& weight_data_bf16, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4a-kw-kh-inch/4a-outch Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_bf16.create(maxk, num_input / 4, num_output, (size_t)2 * 4, 4); for (int q = 0; q < num_output; q++) { const Mat k0 = weight_data_r2.channel(q); Mat g0 = weight_data_bf16.channel(q); for (int p = 0; p + 3 < num_input; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); unsigned short* g00 = g0.row<unsigned short>(p / 4); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k01[k]); g00[2] = float32_to_bfloat16(k02[k]); g00[3] = float32_to_bfloat16(k03[k]); g00 += 4; } } } } static void convolution_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { unsigned short* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } const unsigned short* kptr = weight_data_bf16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const unsigned short* sptr = m.row<const unsigned short>(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { float32x4_t _val = vcvt_f32_bf16(vld1_u16(sptr + space_ofs[k] * 4)); float32x4_t _w = vcvt_f32_bf16(vld1_u16(kptr)); float32x4_t _s4 = vmulq_f32(_val, _w); #if __aarch64__ sum += vaddvq_f32(_s4); // dot #else float32x2_t _ss = vadd_f32(vget_low_f32(_s4), vget_high_f32(_s4)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #endif kptr += 4; } } sum = activation_ss(sum, activation_type, activation_params); outptr[j] = float32_to_bfloat16(sum); } outptr += outw; } } }
DRB035-truedepscalar-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Loop carried true dep between tmp =.. and ..= tmp. Data race pair: tmp@66:12 vs. tmp@67:5 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int tmp; tmp = 10; int len=100; int a[100]; #pragma omp parallel for schedule(dynamic) for (i=0;i<len;i++) { a[i] = tmp; tmp =a[i]+i; } printf("a[50]=%d\n", a[50]); return 0; }
special_random_ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #ifndef LIBND4J_SPECIAL_RANDOM_OPS_H #define LIBND4J_SPECIAL_RANDOM_OPS_H #include <ops/random_ops.h> #include <helpers/shape.h> namespace randomOps { ////////////////////////////////////////////////////////////////////// template<typename T> class Choice { public: method_idx method_X method_XY static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; __shared__ Nd4jLong xLength; __shared__ Nd4jLong yLength; __shared__ Nd4jLong zLength; __shared__ Nd4jLong xEWS; __shared__ Nd4jLong yEWS; __shared__ Nd4jLong zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); xLength = shape::length(xShapeBuffer); yLength = shape::length(yShapeBuffer); zLength = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { for (Nd4jLong e = tid; e < zLength; e+=blockDim.x * gridDim.x) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } __syncthreads(); } __syncthreads(); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; __shared__ int xRank; __shared__ int yRank; __shared__ int zRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); zRank = shape::rank(zShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); yShape = shape::shapeOf(yShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); xStride = shape::stride(xShapeBuffer); yStride = shape::stride(yShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); for (Nd4jLong i = tid; i < zLength; i+=blockDim.x * gridDim.x) { shape::ind2sub(zRank, zShape, i, zCoord); auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); auto xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } __syncthreads(); } __syncthreads(); } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; Nd4jLong yLength = shape::length(yShapeBuffer); Nd4jLong zLength = shape::length(zShapeBuffer); auto xEWS = shape::elementWiseStride(xShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong e = 0; e < zLength; e++) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } } } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int zRank = shape::rank(zShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto yShape = shape::shapeOf(yShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto yStride = shape::stride(yShapeBuffer); auto zStride = shape::stride(zShapeBuffer); #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong i = 0; i < zLength; i++) { shape::ind2sub(zRank, zShape, i, zCoord); auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within specified boundaries. Distribuion is Gaussian */ template<typename T> class GaussianDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-5); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; for (int e = tid; e < middle; e += step) { auto epm = e + middle; // we need to get random values T r0 = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); T r1 = buffer->relativeT<T>(epm, epsilon, static_cast<T>(1.0f)); T realMean0 = y == z ? mean : y[e * yEWS]; z[e * zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_cos<T>(two_pi * r1)) * stddev + realMean0; if (epm < zLength) { T realMean1 = y == z ? mean : y[epm * yEWS]; z[epm * zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_sin<T>(two_pi * r1)) * stddev + realMean1; } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); auto zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (middle / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); const T mean = extraArguments[0]; const T stddev = extraArguments[1]; const T epsilon = static_cast<T>(1e-5); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > middle) end = middle; for (Nd4jLong e = start; e < end; e++) { auto epm = e + middle; // we need to get random values T r0 = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); T r1 = buffer->relativeT<T>(epm, epsilon, static_cast<T>(1.0f)); T realMean0 = y == z ? mean : y[e * yEWS]; z[e * zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_cos<T>(two_pi * r1)) * stddev + realMean0; if (epm < zLength) { T realMean1 = y == z ? mean : y[epm * yEWS]; z[epm * zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_sin<T>(two_pi * r1)) * stddev + realMean1; } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *>(state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistributionEx { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); auto span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev] template<typename T> class TruncatedNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-6f); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; T result0, result1, u0, u1, z0, z1, uT, uP; T ds = nd4j::math::nd4j_abs<T>(stddev) * static_cast<T>(2.0f); for (Nd4jLong e = tid; e < middle; e += step) { // we need to get random values Nd4jLong generation0 = 0; auto epm = e + middle; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[epm * yEWS]; T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0); T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1); do { u0 = buffer->relativeT<T>(e + generation0, epsilon, static_cast<T>(1.0f)); u1 = buffer->relativeT<T>(epm + generation0, epsilon, static_cast<T>(1.0f)); uT = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); uP = two_pi * u1; z0 = uT * nd4j::math::nd4j_cos<T>(uP); z1 = uT * nd4j::math::nd4j_sin<T>(uP); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (ds < aRealMean0 + nd4j::math::nd4j_abs<T>(result0) || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds); z[e * zEWS] = result0; if((epm) < zLength) z[epm * zEWS] = result1; } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (middle / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > middle) { end = middle; } T z0, z1; T u0, u1; T result0, result1, lnu0, lnu1; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jLong e = start; e < end; e++) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ Nd4jLong generation0 = 0; auto epm = e + middle; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[epm * yEWS]; T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0); T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1); do { u0 = buffer->relativeT<T>(e + generation0, static_cast<T>(1e-6f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((epm + generation0), static_cast<T>(1e-6f), static_cast<T>(1.0f)); lnu0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); lnu1 = two_pi * u1; z0 = lnu0 * nd4j::math::nd4j_cos<T>(lnu1); z1 = lnu0 * nd4j::math::nd4j_sin<T>(lnu1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (aRealMean0 + nd4j::math::nd4j_abs<T>(result0) > ds || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if(epm < zLength) z[epm * zEWS] = result1; } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Log-normal distribution template<typename T> class LogNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T*>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-5); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; for (Nd4jLong e = tid; e < middle; e += step) { auto epm = e + middle; // we need to get random values T r0 = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); T r1 = buffer->relativeT<T>(epm, epsilon, static_cast<T>(1.0f)); T realMean = y == z ? mean : y[e * yEWS]; z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_cos<T>(two_pi * r1)) * stddev + realMean); if (epm < zLength) { realMean = y == z ? mean : y[epm * yEWS]; z[epm *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_sin<T>(two_pi * r1)) * stddev + realMean); } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; auto buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); const T mean = extraArguments[0]; const T stddev = extraArguments[1]; const T epsilon = static_cast<T>(1e-5); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > middle) end = middle; for (Nd4jLong e = start; e < end; e++) { auto epm = e + middle; // we need to get random values T r0 = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); T r1 = buffer->relativeT<T>(epm, epsilon, static_cast<T>(1.0f)); T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_cos<T>(two_pi * r1)) * stddev + realMean); if (epm < zLength) { realMean = y == z ? mean : y[epm * yEWS]; z[epm * zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(r0)) * nd4j::math::nd4j_sin<T>(two_pi * r1)) * stddev + realMean); } } } // update rng state buffer->rewindH(zLength); } }; } #endif //LIBND4J_SPECIAL_RANDOM_OPS_H
convolution_1x1_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_packnto1_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * packn; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(outptr, _val, vl); r0 += packn * 2; outptr += packn; } r0 += tailstep; } } conv1x1s1_sgemm_packnto1_fp16sa_rvv(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
convolution_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __AVX__ static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __AVX__ _mm256_storeu_ps(tmpptr, _mm256_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __SSE__ tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr+1); __m256 _sum2 = _mm256_broadcast_ss(biasptr+2); __m256 _sum3 = _mm256_broadcast_ss(biasptr+3); __m256 _sum4 = _mm256_broadcast_ss(biasptr+4); __m256 _sum5 = _mm256_broadcast_ss(biasptr+5); __m256 _sum6 = _mm256_broadcast_ss(biasptr+6); __m256 _sum7 = _mm256_broadcast_ss(biasptr+7); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va+4); _va1 = _mm256_broadcast_ss(va+5); _va2 = _mm256_broadcast_ss(va+6); _va3 = _mm256_broadcast_ss(va+7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _va4 = _mm256_broadcast_ss(va+4); __m256 _va5 = _mm256_broadcast_ss(va+5); __m256 _va6 = _mm256_broadcast_ss(va+6); __m256 _va7 = _mm256_broadcast_ss(va+7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __AVX__ __m256 _sum0_7 = _mm256_loadu_ps(biasptr); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb+1); __m256 _vb2 = _mm256_broadcast_ss(vb+2); __m256 _vb3 = _mm256_broadcast_ss(vb+3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va+8); __m256 _va2 = _mm256_loadu_ps(va+16); __m256 _va3 = _mm256_loadu_ps(va+24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k<L; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7);// sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8 + (i%8)/4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(biasptr); __m256 _sum1 = _mm256_broadcast_ss(biasptr+1); __m256 _sum2 = _mm256_broadcast_ss(biasptr+2); __m256 _sum3 = _mm256_broadcast_ss(biasptr+3); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va+1); _va2 = _mm256_broadcast_ss(va+2); _va3 = _mm256_broadcast_ss(va+3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 4; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8 + (i%8)/4); #if __AVX__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0);// sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1);// sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2);// sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3);// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3);// sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #if __AVX__ __m256 _sum0 = _mm256_broadcast_ss(&bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va+1); __m256 _va2 = _mm256_broadcast_ss(va+2); __m256 _va3 = _mm256_broadcast_ss(va+3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb+8); __m256 _vb2 = _mm256_loadu_ps(vb+16); __m256 _vb3 = _mm256_loadu_ps(vb+24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k<L; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 4; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __AVX__ output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); int k=0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); vb += 4; __m128 _k0 = _mm_loadu_ps(va); va += 4; _sum0 = _mm_fmadd_ps(_p0, _k0, _sum0); } float output_sum0[4] = {0.f}; _mm_storeu_ps(output_sum0, _sum0); float sum0 = bias0 + output_sum0[0] + output_sum0[1] + output_sum0[2] + output_sum0[3]; #else float sum0 = bias0; #endif // __AVX__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #else static void conv_im2col_sgemm_transform_kernel_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; // kernel memory packed 4 x 4 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/4 + p%4); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_sse(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 4 x 4 Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4); for (int q=0; q<inch*kernel_size; q++) { #if __SSE__ _mm_storeu_ps(tmpptr, _mm_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; #endif // __SSE__ tmpptr += 4; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4 + i%4); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(biasptr[0]); __m128 _sum1 = _mm_set1_ps(biasptr[1]); __m128 _sum2 = _mm_set1_ps(biasptr[2]); __m128 _sum3 = _mm_set1_ps(biasptr[3]); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 // k1 _vb = _mm_loadu_ps(vb+4); _va0 = _mm_set1_ps(va[4]); _va1 = _mm_set1_ps(va[5]); _va2 = _mm_set1_ps(va[6]); _va3 = _mm_set1_ps(va[7]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a10-a13) * k01 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a10-a13) * k11 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a10-a13) * k21 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a10-a13) * k31 // k2 _vb = _mm_loadu_ps(vb+8); _va0 = _mm_set1_ps(va[8]); _va1 = _mm_set1_ps(va[9]); _va2 = _mm_set1_ps(va[10]); _va3 = _mm_set1_ps(va[11]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a20-a23) * k02 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a20-a23) * k12 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a20-a23) * k22 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a20-a23) * k32 // k3 _vb = _mm_loadu_ps(vb+12); _va0 = _mm_set1_ps(va[12]); _va1 = _mm_set1_ps(va[13]); _va2 = _mm_set1_ps(va[14]); _va3 = _mm_set1_ps(va[15]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a30-a33) * k03 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a30-a33) * k13 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a30-a33) * k23 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a30-a33) * k33 va += 16; vb += 16; } for (; k<L; k++) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 va += 4; vb += 4; } _mm_storeu_ps(output0, _sum0); _mm_storeu_ps(output1, _sum1); _mm_storeu_ps(output2, _sum2); _mm_storeu_ps(output3, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+4]; sum1[n] += va[1] * vb[n+4]; sum2[n] += va[2] * vb[n+4]; sum3[n] += va[3] * vb[n+4]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+12]; sum1[n] += va[1] * vb[n+12]; sum2[n] += va[2] * vb[n+12]; sum3[n] += va[3] * vb[n+12]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+20]; sum1[n] += va[1] * vb[n+20]; sum2[n] += va[2] * vb[n+20]; sum3[n] += va[3] * vb[n+20]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+28]; sum1[n] += va[1] * vb[n+28]; sum2[n] += va[2] * vb[n+28]; sum3[n] += va[3] * vb[n+28]; va -= 28; } va += 32; vb += 32; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __SSE__ output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));// sum0 += (k00-k30) * a00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1));// sum1 += (k01-k31) * a10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2));// sum2 += (k02-k32) * a20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3));// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0));// sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3.m128_f32[0]; output1[0] = _sum0_3.m128_f32[1]; output2[0] = _sum0_3.m128_f32[2]; output3[0] = _sum0_3.m128_f32[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __SSE__ output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4 + i%4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); __m128 _vb0 = _mm_loadu_ps(vb); __m128 _vb1 = _mm_loadu_ps(vb+4); __m128 _vb2 = _mm_loadu_ps(vb+8); __m128 _vb3 = _mm_loadu_ps(vb+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0));// sum0 = (a00-a03) * k00 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1));// sum0 += (a10-a13) * k01 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2));// sum0 += (a20-a23) * k02 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3));// sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k<L; k++) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _vb0 = _mm_loadu_ps(vb); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } _mm_storeu_ps(output, _sum0); #else float sum[4] = {0}; int k=0; for (; k+3<L; k=k+4) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+4]; sum[n] += va[2] * vb[n+8]; sum[n] += va[3] * vb[n+12]; //sum[n] += va[4] * vb[n+16]; //sum[n] += va[5] * vb[n+20]; //sum[n] += va[6] * vb[n+24]; //sum[n] += va[7] * vb[n+28]; } va += 4; vb += 16; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = sum[n] + bias0; } #endif // __SSE__ output += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4 + i%4); int k=0; #if __SSE__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } float sum0 = bias0 + _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = bias0; #endif // __SSE__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } #endif
lai.c
#include<stdio.h> #include "gdal.h" #include<omp.h> #include "cpl_string.h" /* mcd15A3 MODLAND_QC bits [0-1] * 0 -> class 0: LAI produced, Good quality (main algorithm with or without saturation) * 1 -> class 1: LAI produced, Other Quality (back-up algorithm or fill values) */ #define NODATA 255 #define Null 1000000000 int mcd15A3a(int pixel) { return (pixel & 0x01); } void usage() { printf( "-----------------------------------------\n"); printf( "--Modis Processing chain--OpenMP code----\n"); printf( "-----------------------------------------\n"); printf( "./lai inLAI inLAI_QA\n"); printf( "\toutLAI\n"); printf( "\t[Offset Scale]\n"); printf( "-----------------------------------------\n"); printf( "inLAI\t\tModis MCD15A3 LAI 1000m\n"); printf( "inLAI_QA\t\tModis MCD15A3 FparLai_QC\n"); printf( "outLAI\tQA corrected LAI output [-]\n"); printf( "Offset\t Optional offset (DN2LAI)\n"); printf( "Scale\t Optional scale (DN2LAI)\n"); return; } int main( int argc, char *argv[] ) { if( argc < 4 ) { usage(); return 1; } char *inB2 = argv[1]; //LAI char *inB3 = argv[2]; //LAI_QA char *laiF = argv[3]; // Corrected LAI float offset=Null, scale=Null; if(argv[4] != NULL && argv[5] != NULL){ offset = atof(argv[4]); // Optional Offset (offset+DN*scale) scale = atof(argv[5]); // Optional scale (offset+DN*scale) } GDALAllRegister(); GDALDatasetH hD2 = GDALOpen(inB2,GA_ReadOnly);//LAI GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//LAI_QA if(hD2==NULL||hD3==NULL){ printf("One or more input files "); printf("could not be loaded\n"); exit(1); } GDALDriverH hDr2 = GDALGetDatasetDriver(hD2); char **options = NULL; //options = CSLSetNameValue( options, "TILED", "YES" ); //options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" ); //options = CSLSetNameValue( options, "PREDICTOR", "2" ); GDALDatasetH hDOut = GDALCreateCopy(hDr2,laiF,hD2,FALSE,options,NULL,NULL); GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1); GDALSetRasterNoDataValue(hBOut, NODATA); GDALRasterBandH hB2 = GDALGetRasterBand(hD2,1);//LAI GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//LAI_QA int nX = GDALGetRasterBandXSize(hB2); int nY = GDALGetRasterBandYSize(hB2); int N=nX*nY; float *l2 = (float *) malloc(sizeof(float)*N); float *l3 = (float *) malloc(sizeof(float)*N); float *lOut = (float *) malloc(sizeof(float)*N); int rc, qa; //LAI 1Km int err = 0; err=GDALRasterIO(hB2,GF_Read,0,0,nX,nY,l2,nX,nY,GDT_Float32,0,0); //LAI_QA 1Km err=GDALRasterIO(hB3,GF_Read,0,0,nX,nY,l3,nX,nY,GDT_Float32,0,0); #pragma omp parallel for default(none) \ private (rc, qa) shared (N, l2, l3, lOut, offset, scale) for(rc=0;rc<N;rc++){ qa=mcd15A3a(l3[rc]); if( qa != 0) lOut[rc] = NODATA; if(offset!=Null && scale!=Null){ lOut[rc] = offset + l2[rc] * scale; } else lOut[rc] = l2[rc]; } #pragma omp barrier err=GDALRasterIO(hBOut,GF_Write,0,0,nX,nY,lOut,nX,nY,GDT_Float32,0,0); err=err+1; if( l2 != NULL ) free( l2 ); if( l3 != NULL ) free( l3 ); GDALClose(hD2); GDALClose(hD3); GDALClose(hDOut); return(EXIT_SUCCESS); }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" #define ThrowWandException(severity,tag,context) \ { \ (void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \ tag,"`%s'",context); \ return(MagickFalse); \ } /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireCacheView(pixel_view->wand->images); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } #endif
zgeadd.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_geadd * * Performs an addition of two general rectangular matrices similarly to the * 'pztradd()' function from the PBLAS library: * * \f[ B = \alpha * op( A ) + \beta * B, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^H, \f] * * alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or * n-by-m matrix depending on the value of transa and B an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] m * Number of rows of the matrices op( A ) and B. * m >= 0. * * @param[in] n * Number of columns of the matrices op( A ) and B. * n >= 0. * * @param[in] alpha * Scalar factor of A. * * @param[in] pA * Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans * and m otherwise. * * @param[in] lda * Leading dimension of the array A. lda >= max(1,l), where l is m * when transa = PlasmaNoTrans and n otherwise. * * @param[in] beta * Scalar factor of B. * * @param[in,out] pB * Matrix of size ldb-by-n. * On exit, B = alpha * op( A ) + beta * B * * @param[in] ldb * Leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_zgeadd * @sa plasma_cgeadd * @sa plasma_dgeadd * @sa plasma_sgeadd * ******************************************************************************/ int plasma_zgeadd(plasma_enum_t transa, int m, int n, plasma_complex64_t alpha, plasma_complex64_t *pA, int lda, plasma_complex64_t beta, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (pA == NULL) { plasma_error("NULL A"); return -5; } int am, an; if (transa == PlasmaNoTrans) { am = m; an = n; } else { am = n; an = m; } int bm = m; int bn = n; if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -6; } if (pB == NULL) { plasma_error("NULL B"); return -8; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -9; } // quick return if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0)) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); // Call tile async function. plasma_omp_zgeadd(transa, alpha, A, beta, B, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_geadd * * Performs an addition of two general rectangular matrices similarly to the * 'pztradd()' function from the PBLAS library. Non-blocking tile version of * plasma_zgeadd(). May return before the computation is finished. Operates on * matrices stored by tiles. All matrices are passed through descriptors. All * dimensions are taken from the descriptors. Allows for pipelining of * operations at runtime. * ******************************************************************************* * * @param[in] transa * Specifies whether the matrix A is non-transposed, transposed, or * conjugate transposed * - PlasmaNoTrans: op( A ) = A * - PlasmaTrans: op( A ) = A^T * - PlasmaConjTrans: op( A ) = A^H * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] beta * The scalar beta. * * @param[in,out] B * Descriptor of matrix B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check the * sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgeadd * @sa plasma_omp_cgeadd * @sa plasma_omp_dgeadd * @sa plasma_omp_sgeadd * ******************************************************************************/ void plasma_omp_zgeadd(plasma_enum_t transa, plasma_complex64_t alpha, plasma_desc_t A, plasma_complex64_t beta, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int am = transa == PlasmaNoTrans ? A.m : A.n; if ((alpha == 0.0 || am == 0) && beta == 1.0) return; // Call the parallel function. plasma_pzgeadd(transa, alpha, A, beta, B, sequence, request); }
DRB025-simdtruedep-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This one has race condition due to true dependence. But data races happen at instruction level, not thread level. Data race pair: a[i+1]@68:5 vs. a[i]@68:12 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len=100; if (argc>1) len = atoi(argv[1]); int a[len], b[len]; #pragma omp parallel for private(i ) for (i=0;i<len;i++) { a[i]=i; b[i]=i+1; } for (i=0;i<len-1;i++) a[i+1]=a[i]*b[i]; for (i=0;i<len;i++) { printf("%d %d\n", a[i], b[i]); } return 0; }
DRB039-truedepsingleelement-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Data race pair: a[i]@62:5 vs. a[0]@62:15 */ #include <stdlib.h> #include <stdio.h> int main (int argc, char* argv[]) { int len=1000; int i; int a[1000]; a[0] = 2; #pragma omp parallel for for (i=0;i<len;i++) a[i]=a[i]+a[0]; printf("a[500]=%d\n", a[500]); return 0; }
GB_binop__isne_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int8) // A.*B function (eWiseMult): GB (_AemultB_08__isne_int8) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int8) // A.*B function (eWiseMult): GB (_AemultB_04__isne_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int8) // A*D function (colscale): GB (_AxD__isne_int8) // D*A function (rowscale): GB (_DxB__isne_int8) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int8) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int8) // C=scalar+B GB (_bind1st__isne_int8) // C=scalar+B' GB (_bind1st_tran__isne_int8) // C=A+scalar GB (_bind2nd__isne_int8) // C=A'+scalar GB (_bind2nd_tran__isne_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT8 || GxB_NO_ISNE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
graph.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GRAPH_H_ #define GRAPH_H_ #include <algorithm> #include <cinttypes> #include <cstddef> #include <iostream> #include <type_traits> #include "pvector.h" #include "util.h" /* GAP Benchmark Suite Class: CSRGraph Author: Scott Beamer Simple container for graph in CSR format - Intended to be constructed by a Builder - To make weighted, set DestID_ template type to NodeWeight - MakeInverse parameter controls whether graph stores its inverse */ /* DMM-GAPBS Author: Zach Hansen Adaptation Notes: - Neighborhood methods are overwritten to enable PEs to iterate over neighborhoods on foreign PEs using range based iteration (performs shmem gets behind the scenes) - shmem ptrs should be used when two PEs share a memory space, but there is no check for that yet - Enables printing of the distributed topology */ // Used to hold node & weight, with another node it makes a weighted edge template <typename NodeID_, typename WeightT_> struct NodeWeight { NodeID_ v; WeightT_ w; NodeWeight() {} NodeWeight(NodeID_ v) : v(v), w(1) {} NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {} bool operator< (const NodeWeight& rhs) const { return v == rhs.v ? w < rhs.w : v < rhs.v; } // doesn't check WeightT_s, needed to remove duplicate edges bool operator== (const NodeWeight& rhs) const { return v == rhs.v; } // doesn't check WeightT_s, needed to remove self edges bool operator== (const NodeID_& rhs) const { return v == rhs; } operator NodeID_() { return v; } }; template <typename NodeID_, typename WeightT_> std::ostream& operator<<(std::ostream& os, const NodeWeight<NodeID_, WeightT_>& nw) { os << nw.v << " " << nw.w; return os; } template <typename NodeID_, typename WeightT_> std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) { is >> nw.v >> nw.w; return is; } // Syntatic sugar for an edge template <typename SrcT, typename DstT = SrcT> struct EdgePair { SrcT u; DstT v; EdgePair() {} EdgePair(SrcT u, DstT v) : u(u), v(v) {} }; // SG = serialized graph, these types are for writing graph to file typedef int32_t SGID; typedef EdgePair<SGID> SGEdge; typedef int64_t SGOffset; // If 2 PEs share a memory space, it is more efficient to use the shmem_ptrs // But PEs on separate machines need to use the get/put mems template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true> class CSRGraph { // Used for *non-negative* offsets within a neighborhood typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT; // Used to access neighbors of vertex, basically sugar for iterators class Neighborhood { int owner; bool same_space_; NodeID_ n_; NodeID_ local; DestID_** g_index_; DestID_** ptr_start; DestID_** ptr_end; OffsetT start_offset_; DestID_* foreign_start; DestID_* foreign_end; DestID_* beginning; DestID_* ending; public: DestID_* current; // Current position of iterator within neighborhood DestID_ curr_val; // Current value pointed at by iterator Neighborhood(NodeID_ n, DestID_** g_index, Partition<NodeID_> vp, OffsetT start_offset, bool same_space = false) : n_(n), g_index_(g_index), start_offset_(0), same_space_(same_space) { OffsetT max_offset; local = vp.local_pos(n_); owner = vp.recv(n_); //printf("PE %d | g_index: %p, globeql %d local %d\n", shmem_my_pe(), (void*) g_index_, n, local); if (vp.pe == owner) { max_offset = g_index_[local+1] - g_index_[local]; start_offset_ = std::min(start_offset, max_offset); beginning = g_index_[local] + start_offset_; ending = g_index_[local+1]; } else { if (same_space_) { // true if calling PE shares a memory space with owner pe ptr_start = (DestID_**) shmem_ptr(g_index_+local, owner); ptr_end = (DestID_ **) shmem_ptr((g_index_+(local+1)), owner); max_offset = ptr_end - ptr_start; start_offset_ = std::min(start_offset, max_offset); beginning = *ptr_start + start_offset_; ending = *ptr_end; } else { shmem_getmem(&foreign_start, g_index_+local, sizeof(DestID_*), owner); shmem_getmem(&foreign_end, g_index_+(local+1), sizeof(DestID_*), owner); max_offset = foreign_end - foreign_start; start_offset_ = std::min(start_offset, max_offset); beginning = foreign_start + start_offset_; ending = foreign_end; //printf("PE %d | Node: %d | (beginning = %p) => %d | (ending = %p) => %d\n", vp.pe, n, (void*) (beginning), *beginning, (void*) ending, *ending); } } current = beginning; } // begin and end are used for range based iteration, must return refs to neighborhoods instead of DestIDs // otherwise the overloaded operators won't work Neighborhood& begin() { return *this; } Neighborhood& end() { return *this; } typedef DestID_* iterator; // start and finish are what begin and end used to be: local memory can be directly dereferenced with these, // but for accessing PEs on separate computers they can only be used to get the address for use in a different shmem call iterator start(bool help = false) { if (shmem_my_pe() == owner) { //printf("PE %d is starting for node %d\n", shmem_my_pe(), n_); return(beginning); } else { if (help) printf("PE %d is calling n_start on a foreign PE for node %d\n", shmem_my_pe(), n_); if (same_space_) { // check to see if the PEs share a memory space DestID_* neigh_start = (DestID_ *) shmem_ptr(beginning, owner); return(neigh_start); } else { //printf("PE %d is requesting start address %p\n", shmem_my_pe(), (void*) (beginning)); return(beginning); } } } iterator finish(bool help = false) { if (shmem_my_pe() == owner) return(ending); else { if (same_space_) return ((DestID_*) shmem_ptr(ending, owner)); else return(ending); } } bool operator!=(Neighborhood const& it) const { return it.ending != current; } DestID_& operator*() { if (shmem_my_pe() == owner) { return(*current); } else { shmem_getmem(&curr_val, current, sizeof(DestID_), owner); return(curr_val); } } const DestID_& operator*() const { if (shmem_my_pe() == owner) { return(*current); } else { shmem_getmem(&curr_val, current, sizeof(DestID_), owner); return(curr_val); } } Neighborhood& operator+(size_t n) { current = current + n; // Is this correct pointer arithmetic? return *this; } // prefix increment operator Neighborhood& operator++() { ++current; return *this; } // postfix increment operator Neighborhood operator++(int) { ++current; return *this; } DestID_& operator[](size_t n) { if (shmem_my_pe() == owner) { return(*(beginning+n)); } else { shmem_getmem(&curr_val, beginning+n, sizeof(DestID_), owner); //printf("PE %d is pulling val (%d) from %p on PE %d\n", shmem_my_pe(), val, (void*) (begin()+n), owner); return curr_val; } } const DestID_& operator[](size_t n) const { if (shmem_my_pe() == owner) { return(*(beginning+n)); } else { shmem_getmem(&curr_val, beginning+n, sizeof(DestID_), owner); return curr_val; } } }; void ReleaseResources() { if (out_index_ != nullptr) shmem_free(out_index_); if (out_neighbors_ != nullptr) shmem_free(out_neighbors_); if (directed_) { if (in_index_ != nullptr) shmem_free(in_index_); if (in_neighbors_ != nullptr) shmem_free(in_neighbors_); } } public: CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1), out_index_(nullptr), out_neighbors_(nullptr), in_index_(nullptr), in_neighbors_(nullptr) {} CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs, long* pSync, long* pWrk) : directed_(false), num_nodes_(num_nodes), out_index_(index), out_neighbors_(neighs), in_index_(index), in_neighbors_(neighs) { int64_t* edge_counts = (int64_t*) shmem_malloc(sizeof(int64_t)); Partition<NodeID_> p(num_nodes_); *edge_counts = out_index_[p.end - p.start] - out_index_[0]; // how long is the local neighbor array? shmem_long_sum_to_all(edge_counts, edge_counts, 1, 0, 0, p.npes, pWrk, pSync); // Reduction : + num_edges_ = *edge_counts / 2; } CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs, DestID_** in_index, DestID_* in_neighs, long* pSync, long* pWrk, bool symmetric = true) : directed_(true), num_nodes_(num_nodes), out_index_(out_index), out_neighbors_(out_neighs), in_index_(in_index), in_neighbors_(in_neighs)/*, p_{num_nodes}*/ { int64_t* edge_counts = (int64_t*) shmem_malloc(sizeof(int64_t)); Partition<NodeID_> p(num_nodes_); *edge_counts = out_index_[p.end - p.start] - out_index_[0]; // how long is the local neighbor array? shmem_long_sum_to_all(edge_counts, edge_counts, 1, 0, 0, p.npes, pWrk, pSync); // Reduction : + num_edges_ = *edge_counts; } CSRGraph(CSRGraph&& other) : directed_(other.directed_), num_nodes_(other.num_nodes_), num_edges_(other.num_edges_), out_index_(other.out_index_), out_neighbors_(other.out_neighbors_), in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) { other.num_edges_ = -1; other.num_nodes_ = -1; other.out_index_ = nullptr; other.out_neighbors_ = nullptr; other.in_index_ = nullptr; other.in_neighbors_ = nullptr; } ~CSRGraph() { ReleaseResources(); } CSRGraph& operator=(CSRGraph&& other) { if (this != &other) { ReleaseResources(); directed_ = other.directed_; num_edges_ = other.num_edges_; num_nodes_ = other.num_nodes_; out_index_ = other.out_index_; out_neighbors_ = other.out_neighbors_; in_index_ = other.in_index_; in_neighbors_ = other.in_neighbors_; other.num_edges_ = -1; other.num_nodes_ = -1; other.out_index_ = nullptr; other.out_neighbors_ = nullptr; other.in_index_ = nullptr; other.in_neighbors_ = nullptr; } return *this; } bool directed() const { return directed_; } int64_t num_nodes() const { return num_nodes_; } int64_t num_edges() const { return num_edges_; } int64_t num_edges_directed() const { return directed_ ? num_edges_ : 2*num_edges_; } int64_t out_degree(NodeID_ v, bool same_space = false) const { // Do the owner of v and the calling PE share a memory space? int64_t degree; Partition<NodeID_> vp(num_nodes_); NodeID_ local = vp.local_pos(v); if (v >= vp.start && v < vp.end) { degree = out_index_[local+1] - out_index_[local]; } else { if (same_space) { NodeID_** p_one = (NodeID_**) shmem_ptr(out_index_+local, vp.recv(v)); NodeID_** p_two = (NodeID_ **) shmem_ptr((out_index_+(local+1)), vp.recv(v)); degree = *p_two - *p_one; } else { DestID_* one; DestID_* two; shmem_getmem(&one, out_index_+local, sizeof(DestID_*), vp.recv(v)); shmem_getmem(&two, out_index_+(local+1), sizeof(DestID_*), vp.recv(v)); degree = two - one; } } return degree; } int64_t in_degree(NodeID_ v, bool same_space = false) const { static_assert(MakeInverse, "Graph inversion disabled but reading inverse"); int64_t degree; Partition<NodeID_> vp(num_nodes_); NodeID_ local = vp.local_pos(v); if (v >= vp.start && v < vp.end) { degree = in_index_[local+1] - in_index_[local]; } else { if (same_space) { NodeID_** p_one = (NodeID_**) shmem_ptr(in_index_+local, vp.recv(v)); NodeID_** p_two = (NodeID_ **) shmem_ptr((in_index_+(local+1)), vp.recv(v)); degree = *p_two - *p_one; } else { DestID_* one; DestID_* two; shmem_getmem(&one, in_index_+local, sizeof(DestID_*), vp.recv(v)); shmem_getmem(&two, in_index_+(local+1), sizeof(DestID_*), vp.recv(v)); degree = two - one; } } return degree; } Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const { Partition<NodeID_> vp(num_nodes_); return Neighborhood(n, out_index_, vp, start_offset); } Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const { Partition<NodeID_> vp(num_nodes_); static_assert(MakeInverse, "Graph inversion disabled but reading inverse"); return Neighborhood(n, in_index_, vp, start_offset); } void PrintStats() const { std::cout << "Graph has " << num_nodes_ << " nodes and " << num_edges_ << " "; if (!directed_) std::cout << "un"; std::cout << "directed edges for degree: "; std::cout << num_edges_/num_nodes_ << std::endl; } void PrintTopology(long* PRINT_LOCK) const { shmem_barrier_all(); Partition<NodeID_> vp(num_nodes_); shmem_set_lock(PRINT_LOCK); std::cout << "######################## Graph Topology (Outgoing): PE " << vp.pe << " #######################" << std::endl; for (NodeID_ i = vp.start; i < vp.end; i++) { std::cout << i << ": "; //printf("Node %d begin = %d, end = %d\n", i, *(out_neigh(i).begin()), *(out_neigh(i).end())); for (DestID_ j : out_neigh(i)) std::cout << j << " "; std::cout << std::endl; } shmem_clear_lock(PRINT_LOCK); shmem_barrier_all(); } void PrintTopology(bool outgoing = true) { Partition<NodeID_> vp(num_nodes_); int* PRINTER = (int *) shmem_calloc(1, sizeof(int)); // init 0 shmem_int_wait_until(PRINTER, SHMEM_CMP_EQ, vp.pe); // wait until previous PE puts your pe # in PRINTER if (outgoing) { std::cout << "######################## Graph Topology (Outgoing): PE " << vp.pe << " #######################" << std::endl; int j = 0; for (NodeID_ i = vp.start; i < vp.end; i++) { int k = 0; //printf("PE %d | i %d | start: %p => %d | end: %p => %d\n", vp.pe, i, (void*) out_neigh(i).start(), out_neigh(i).start()[0], (void*) out_neigh(i).finish(), out_neigh(i).finish()[0]); std::cout << i << ": "; for (DestID_ j : out_neigh(i)) { std::cout << j << " "; if (k > 20) break; k++; } //std::cout << std::endl; printf("\n"); if (j > 20) break; j++; } if (!(vp.pe == vp.npes-1)) shmem_int_p(PRINTER, vp.pe+1, vp.pe+1); // who's next? shmem_barrier_all(); } else { std::cout << "######################## Graph Topology (Incoming): PE " << vp.pe << " #######################" << std::endl; int j = 0; for (NodeID_ i = vp.start; i < vp.end; i++) { int k = 0; std::cout << i << ": "; for (DestID_ j : in_neigh(i)) { std::cout << j << " "; if (k > 20) break; k++; } //std::cout << std::endl; printf("\n"); if (j > 20) break; j++; } if (!(vp.pe == vp.npes-1)) shmem_int_p(PRINTER, vp.pe+1, vp.pe+1); // who's next? shmem_barrier_all(); } shmem_free(PRINTER); } // offsets for given pe start from the pes first elem // some unused space - max_width = partition_width + remainder static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs, Partition<NodeID_>* p) { DestID_** index = (DestID_**) shmem_calloc((p->max_width)+1, sizeof(DestID_*)); //#pragma omp parallel for for (NodeID_ n = p->start; n <= p->end; n++) { index[n-p->start] = neighs + offsets[n-p->start]; } return index; } static DestID_** OLDGenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) { NodeID_ length = offsets.size(); DestID_** index = new DestID_*[length]; #pragma omp parallel for for (NodeID_ n=0; n < length; n++) index[n] = neighs + offsets[n]; return index; } pvector<SGOffset> VertexOffsets(Partition<NodeID_>* vp, bool in_graph = false) const { pvector<SGOffset> offsets(vp->partition_width+1); for (NodeID_ n = vp->start; n < vp->end+1; n++) if (in_graph) offsets[n-vp->start] = in_index_[n-vp->start] - in_index_[0]; else offsets[n-vp->start] = out_index_[n-vp->start] - out_index_[0]; return offsets; } Range<NodeID_> vertices() const { return Range<NodeID_>(num_nodes()); } public: bool directed_; int64_t num_nodes_; int64_t num_edges_; DestID_** out_index_; DestID_* out_neighbors_; DestID_** in_index_; DestID_* in_neighbors_; }; #endif // GRAPH_H_
core_dtsmlq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztsmlq.c, normal z -> d, Fri Sep 28 17:38:24 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_tsmlq * * Overwrites the general complex m1-by-n1 tile A1 and * m2-by-n2 tile A2 with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q * | A2 | * * trans = PlasmaTrans Q^T * | A1 | | A1 A2 | * Q^T * | A2 | * * where Q is a complex orthogonal matrix defined as the product of k * elementary reflectors * * Q = H(k)^T . . . H(2)^T H(1)^T * * as returned by plasma_core_dtslqt. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^T from the Left; * - PlasmaRight : apply Q or Q^T from the Right. * * @param[in] trans * - PlasmaNoTrans : Apply Q; * - PlasmaTrans : Apply Q^T. * * @param[in] m1 * The number of rows of the tile A1. m1 >= 0. * * @param[in] n1 * The number of columns of the tile A1. n1 >= 0. * * @param[in] m2 * The number of rows of the tile A2. m2 >= 0. * m2 = m1 if side == PlasmaRight. * * @param[in] n2 * The number of columns of the tile A2. n2 >= 0. * n2 = n1 if side == PlasmaLeft. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m1-by-n1 tile A1. * On exit, A1 is overwritten by the application of Q. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m1). * * @param[in,out] A2 * On entry, the m2-by-n2 tile A2. * On exit, A2 is overwritten by the application of Q. * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m2). * * @param[in] V * The i-th row must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, as returned by * plasma_core_dtslqt in the first k rows of its array argument V. * * @param[in] ldv * The leading dimension of the array V. ldv >= max(1,k). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param work * Auxiliary workspace array of length * ldwork-by-m1 if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,n1) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_dtsmlq(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, double *work, int ldwork) { // Check input arguments. if (side != PlasmaLeft && side != PlasmaRight) { plasma_coreblas_error("illegal value of side"); return -1; } if (trans != PlasmaNoTrans && trans != PlasmaTrans) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m1 < 0) { plasma_coreblas_error("illegal value of m1"); return -3; } if (n1 < 0) { plasma_coreblas_error("illegal value of n1"); return -4; } if (m2 < 0 || (m2 != m1 && side == PlasmaRight)) { plasma_coreblas_error("illegal value of m2"); return -5; } if (n2 < 0 || (n2 != n1 && side == PlasmaLeft)) { plasma_coreblas_error("illegal value of n2"); return -6; } if (k < 0 || (side == PlasmaLeft && k > m1 ) || (side == PlasmaRight && k > n1)) { plasma_coreblas_error("illegal value of k"); return -7; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -8; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -9; } if (lda1 < imax(1, m1)) { plasma_coreblas_error("illegal value of lda1"); return -10; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -11; } if (lda2 < imax(1, m2)) { plasma_coreblas_error("illegal value of lda2"); return -12; } if (V == NULL) { plasma_coreblas_error("NULL V"); return -13; } if (ldv < imax(1, k)) { plasma_coreblas_error("illegal value of ldv"); return -14; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -15; } if (ldt < imax(1, ib)) { plasma_coreblas_error("illegal value of ldt"); return -16; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -17; } if (ldwork < imax(1, side == PlasmaLeft ? ib : n1)) { plasma_coreblas_error("illegal value of ldwork"); return -18; } // quick return if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans == PlasmaNoTrans) || (side == PlasmaRight && trans != PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } if (trans == PlasmaNoTrans) trans = PlasmaTrans; else trans = PlasmaNoTrans; for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int mi = m1; int ni = n1; if (side == PlasmaLeft) { // H or H^T is applied to C(i:m,1:n). mi = m1 - i; ic = i; } else { // H or H^T is applied to C(1:m,i:n). ni = n1 - i; jc = i; } // Apply H or H^T. plasma_core_dparfb(side, trans, PlasmaForward, PlasmaRowwise, mi, ni, m2, n2, kb, 0, &A1[lda1*jc+ic], lda1, A2, lda2, &V[i], ldv, &T[ldt*i], ldt, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_dtsmlq(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, double *A1, int lda1, double *A2, int lda2, const double *V, int ldv, const double *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*n1]) \ depend(inout:A2[0:lda2*n2]) \ depend(in:V[0:ldv*n2]) \ depend(in:T[0:ib*k]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); double *W = (double*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? ib : n1; // TODO: double check // Call the kernel. int info = plasma_core_dtsmlq(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_dtsmlq() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
GB_binop__islt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp32) // A*D function (colscale): GB (_AxD__islt_fp32) // D*A function (rowscale): GB (_DxB__islt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp32) // C=scalar+B GB (_bind1st__islt_fp32) // C=scalar+B' GB (_bind1st_tran__islt_fp32) // C=A+scalar GB (_bind2nd__islt_fp32) // C=A'+scalar GB (_bind2nd_tran__islt_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_FP32 || GxB_NO_ISLT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
graphAdjLinkedList.c
// ----------------------------------------------------------------------------- // // "00_AccelGraph" // // ----------------------------------------------------------------------------- // Copyright (c) 2014-2019 All rights reserved // ----------------------------------------------------------------------------- // Author : Abdullah Mughrabi // Email : atmughra@ncsu.edu||atmughrabi@gmail.com // File : graphAdjLinkedList.c // Create : 2019-06-21 17:15:17 // Revise : 2019-09-28 15:36:13 // Editor : Abdullah Mughrabi // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <omp.h> #include "timer.h" #include "myMalloc.h" #include "graphConfig.h" #include "edgeList.h" #include "vertex.h" #include "sortRun.h" #include "reorder.h" #include "adjLinkedList.h" #include "graphAdjLinkedList.h" // A utility function that creates a graphAdjLinkedList of V vertices struct GraphAdjLinkedList *graphAdjLinkedListGraphNew(uint32_t V) { struct GraphAdjLinkedList *graphAdjLinkedList = (struct GraphAdjLinkedList *) my_malloc( sizeof(struct GraphAdjLinkedList)); graphAdjLinkedList->num_vertices = V; graphAdjLinkedList->vertices = (struct AdjLinkedList *) my_malloc( V * sizeof(struct AdjLinkedList)); uint32_t i; #pragma omp parallel for for(i = 0; i < V; i++) { graphAdjLinkedList->vertices[i].outNodes = NULL; graphAdjLinkedList->vertices[i].out_degree = 0; #if DIRECTED graphAdjLinkedList->vertices[i].inNodes = NULL; graphAdjLinkedList->vertices[i].in_degree = 0; #endif graphAdjLinkedList->vertices[i].visited = 0; } // printf("\n Success!!! V: %d\n ", V); return graphAdjLinkedList; } struct GraphAdjLinkedList *graphAdjLinkedListEdgeListNew(struct EdgeList *edgeList) { struct GraphAdjLinkedList *graphAdjLinkedList = (struct GraphAdjLinkedList *) my_malloc( sizeof(struct GraphAdjLinkedList)); graphAdjLinkedList->num_vertices = edgeList->num_vertices; graphAdjLinkedList->num_edges = edgeList->num_edges; graphAdjLinkedList->avg_degree = edgeList->num_edges / edgeList->num_vertices; graphAdjLinkedList->vertices = (struct AdjLinkedList *) my_malloc( graphAdjLinkedList->num_vertices * sizeof(struct AdjLinkedList)); #if WEIGHTED graphAdjLinkedList->max_weight = edgeList->max_weight; #endif uint32_t i; #pragma omp parallel for for(i = 0; i < graphAdjLinkedList->num_vertices; i++) { graphAdjLinkedList->vertices[i].outNodes = NULL; graphAdjLinkedList->vertices[i].out_degree = 0; #if DIRECTED graphAdjLinkedList->vertices[i].inNodes = NULL; graphAdjLinkedList->vertices[i].in_degree = 0; #endif graphAdjLinkedList->vertices[i].visited = 0; } omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graphAdjLinkedList->num_vertices * sizeof(omp_lock_t)); #pragma omp parallel for for (i = 0; i < graphAdjLinkedList->num_vertices; i++) { omp_init_lock(&(vertex_lock[i])); } // #pragma omp parallel for for(i = 0; i < edgeList->num_edges; i++) { adjLinkedListAddEdge(graphAdjLinkedList, edgeList, i, vertex_lock); } #pragma omp parallel for for (i = 0; i < graphAdjLinkedList->num_vertices; i++) { omp_destroy_lock(&(vertex_lock[i])); } free(vertex_lock); return graphAdjLinkedList; } // A utility function to print the adjacency list // representation of graphAdjLinkedList void graphAdjLinkedListPrint(struct GraphAdjLinkedList *graphAdjLinkedList) { printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "GraphAdjLinkedList Properties"); printf(" -----------------------------------------------------\n"); #if WEIGHTED printf("| %-51s | \n", "WEIGHTED"); #else printf("| %-51s | \n", "UN-WEIGHTED"); #endif #if DIRECTED printf("| %-51s | \n", "DIRECTED"); #else printf("| %-51s | \n", "UN-DIRECTED"); #endif printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Average Degree (D)"); printf("| %-51u | \n", graphAdjLinkedList->avg_degree); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Vertices (V)"); printf("| %-51u | \n", graphAdjLinkedList->num_vertices); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Number of Edges (E)"); printf("| %-51u | \n", graphAdjLinkedList->num_edges); printf(" -----------------------------------------------------\n"); } void graphAdjLinkedListFree(struct GraphAdjLinkedList *graphAdjLinkedList) { uint32_t v; struct AdjLinkedListNode *pCrawl; struct AdjLinkedListNode *pFree; for (v = 0; v < graphAdjLinkedList->num_vertices; ++v) { pCrawl = graphAdjLinkedList->vertices[v].outNodes; pFree = graphAdjLinkedList->vertices[v].outNodes; while (pCrawl) { pFree = pCrawl; pCrawl = pCrawl->next; if(pFree) free(pFree); } #if DIRECTED pCrawl = graphAdjLinkedList->vertices[v].inNodes; pFree = graphAdjLinkedList->vertices[v].inNodes; while (pCrawl) { pFree = pCrawl; pCrawl = pCrawl->next; if(pFree) free(pFree); } #endif } if(graphAdjLinkedList->vertices) free(graphAdjLinkedList->vertices); if(graphAdjLinkedList) free(graphAdjLinkedList); } void adjLinkedListAddEdge(struct GraphAdjLinkedList *graphAdjLinkedList, struct EdgeList *edge, uint32_t i, omp_lock_t *vertex_lock) { // omp_set_lock(&(vertex_lock[edge->edges_array_src[i]])); // omp_unset_lock((&vertex_lock[edge->edges_array_src[i]])); // Add an edge from src to dest. A new node is // added to the adjacency list of src. The node // is added at the begining struct AdjLinkedListNode *newNode = newAdjLinkedListOutNode(edge->edges_array_dest[i]); #if WEIGHTED newNode->weight = edge->edges_array_weight[i]; #endif // omp_set_lock(&(vertex_lock[edge->edges_array_src[i]])); newNode->next = graphAdjLinkedList->vertices[edge->edges_array_src[i]].outNodes; graphAdjLinkedList->vertices[edge->edges_array_src[i]].out_degree++; graphAdjLinkedList->vertices[edge->edges_array_src[i]].visited = 0; graphAdjLinkedList->vertices[edge->edges_array_src[i]].outNodes = newNode; // omp_unset_lock((&vertex_lock[edge->edges_array_src[i]])); // omp_set_lock(&(vertex_lock[edge->edges_array_dest[i]])); // omp_unset_lock((&vertex_lock[edge->edges_array_dest[i]])); // Since graphAdjLinkedList is undirected, add an edge from // dest to src also newNode = newAdjLinkedListInNode(edge->edges_array_src[i]); #if WEIGHTED newNode->weight = edge->edges_array_weight[i]; #endif // omp_set_lock(&(vertex_lock[edge->edges_array_dest[i]])); #if DIRECTED newNode->next = graphAdjLinkedList->vertices[edge->edges_array_dest[i]].inNodes; graphAdjLinkedList->vertices[edge->edges_array_dest[i]].in_degree++; graphAdjLinkedList->vertices[edge->edges_array_dest[i]].visited = 0; graphAdjLinkedList->vertices[edge->edges_array_dest[i]].inNodes = newNode; #else newNode->next = graphAdjLinkedList->vertices[edge->edges_array_dest[i]].outNodes; graphAdjLinkedList->vertices[edge->edges_array_dest[i]].out_degree++; graphAdjLinkedList->vertices[edge->edges_array_dest[i]].visited = 0; graphAdjLinkedList->vertices[edge->edges_array_dest[i]].outNodes = newNode; #endif // omp_unset_lock((&vertex_lock[edge->edges_array_dest[i]])); } void graphAdjLinkedListPrintMessageWithtime(const char *msg, double time) { printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", msg); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", time); printf(" -----------------------------------------------------\n"); } struct GraphAdjLinkedList *graphAdjLinkedListPreProcessingStep (struct Arguments *arguments) { struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); Start(timer); struct EdgeList *edgeList = readEdgeListsbin(arguments->fnameb, 0, arguments->symmetric, arguments->weighted); Stop(timer); // edgeListPrint(edgeList); edgeList = sortRunAlgorithms(edgeList, arguments->sort); if(arguments->dflag) { Start(timer); edgeList = removeDulpicatesSelfLoopEdges(edgeList); Stop(timer); graphCSRPrintMessageWithtime("Removing duplicate edges (Seconds)", Seconds(timer)); } if(arguments->lmode) { edgeList = reorderGraphProcess(edgeList, arguments); edgeList = sortRunAlgorithms(edgeList, arguments->sort); } // add another layer 2 of reordering to test how DBG affect Gorder, or Gorder affect Rabbit order ...etc arguments->lmode = arguments->lmode_l2; if(arguments->lmode) { edgeList = reorderGraphProcess(edgeList, arguments); edgeList = sortRunAlgorithms(edgeList, arguments->sort); } arguments->lmode = arguments->lmode_l3; if(arguments->lmode) { edgeList = reorderGraphProcess(edgeList, arguments); edgeList = sortRunAlgorithms(edgeList, arguments->sort); } if(arguments->mmode) edgeList = maskGraphProcess(edgeList, arguments); graphAdjLinkedListPrintMessageWithtime("Read Edge List From File (Seconds)", Seconds(timer)); Start(timer); struct GraphAdjLinkedList *graphAdjLinkedList = graphAdjLinkedListEdgeListNew(edgeList); Stop(timer); graphAdjLinkedListPrintMessageWithtime("Create Adj Linked List from EdgeList (Seconds)", Seconds(timer)); graphAdjLinkedListPrint(graphAdjLinkedList); freeEdgeList(edgeList); free(timer); return graphAdjLinkedList; }
BlockMatching_private_initializer.h
#include <algorithm> #include <cmath> #include <iostream> #include <new> #include <stdexcept> template <class T> BlockMatching<T>::BlockMatching(void) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; } template <class T> BlockMatching<T>::BlockMatching(const ImgVector<T>& image_prev, const ImgVector<T>& image_current, const int BlockSize, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_prev"); } else if (image_current.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_current" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_current"); } else if (image_prev.width() != image_current.width()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width of image_prev and image_current not match" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width of image_prev and image_current not match"); } else if (image_prev.height() != image_current.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : height of image_prev and image_current not match" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : height of image_prev and image_current not match"); } else if (BlockSize < 0) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : BlockSize" << std::endl; throw std::out_of_range("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : BlockSize"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = BlockSize; _cells_width = int(ceil(double(_width) / double(_block_size))); _cells_height = int(ceil(double(_height) / double(_block_size))); _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); // Normalize the image image_normalizer(); } template <class T> BlockMatching<T>::BlockMatching(const ImgVector<T>& image_prev, const ImgVector<T>& image_current, const ImgVector<T>& image_next, const int BlockSize, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_prev"); } else if (image_current.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_current" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : ImgVector<T>& image_current"); } else if (image_prev.width() != image_current.width()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width of image_prev and image_current not match" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width of image_prev and image_current not match"); } else if (image_prev.height() != image_current.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : height of image_prev and image_current not match" << std::endl; throw std::invalid_argument("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : height of image_prev and image_current not match"); } else if (BlockSize < 0) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : BlockSize" << std::endl; throw std::out_of_range("BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : BlockSize"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = BlockSize; _cells_width = int(ceil(double(_width) / double(_block_size))); _cells_height = int(ceil(double(_height) / double(_block_size))); _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _image_next.copy(image_next); // Normalize the image image_normalizer(); } template <class T> BlockMatching<T>::BlockMatching(const ImgVector<T>& image_prev, const ImgVector<size_t>& region_map_prev, const ImgVector<T>& image_current, const ImgVector<size_t>& region_map_current, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_prev"); } else if (image_current.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_current" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_current"); } else if (image_prev.width() != image_current.width() || image_prev.height() != image_current.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width or height of image_prev and image_current not match" << std::endl; throw std::invalid_argument("width or height of image_prev and image_current not match"); } else if (region_map_prev.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<int>& region_map_prev" << std::endl; throw std::invalid_argument("const ImgVector<int>& region_map_prev"); } else if (region_map_current.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<int>& region_map_current" << std::endl; throw std::invalid_argument("const ImgVector<int>& region_map_prev"); } else if (region_map_prev.width() != image_prev.width() || region_map_prev.height() != image_prev.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width or height of region_map_prev and image_prev not match" << std::endl; throw std::invalid_argument("width or height of region_map_prev and image_prev not match"); } else if (region_map_current.width() != image_current.width() || region_map_current.height() != image_current.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width or height of region_map_current and image_current not match" << std::endl; throw std::invalid_argument("width or height of region_map_current and image_current not match"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = 1; _cells_width = _width; _cells_height = _height; _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _region_map_prev.copy(region_map_prev); _region_map_current.copy(region_map_current); // Normalize the image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Normalize the input images" << std::endl; #endif image_normalizer(); // Extract connected regions from region_map #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Collect connected region from region map" << std::endl; #endif get_connected_regions(&_connected_regions_prev, region_map_prev); get_connected_regions(&_connected_regions_current, region_map_current); // Get color quantized image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Get color quantized image" << std::endl; #endif get_color_quantized_image(&_color_quantized_prev, _image_prev, _connected_regions_prev); get_color_quantized_image(&_color_quantized_current, _image_current, _connected_regions_current); } template <class T> BlockMatching<T>::BlockMatching(const ImgVector<T>& image_prev, const ImgVector<size_t>& region_map_prev, const ImgVector<T>& image_current, const ImgVector<size_t>& region_map_current, const ImgVector<T>& image_next, const ImgVector<size_t>& region_map_next, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_prev"); } else if (image_current.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_current" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_current"); } else if (image_prev.width() != image_current.width() || image_prev.height() != image_current.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width or height of image_prev and image_current not match" << std::endl; throw std::invalid_argument("width or height of image_prev and image_current not match"); } else if (region_map_prev.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<int>& region_map_prev" << std::endl; throw std::invalid_argument("const ImgVector<int>& region_map_prev"); } else if (region_map_current.isNULL()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<int>& region_map_current" << std::endl; throw std::invalid_argument("const ImgVector<int>& region_map_prev"); } else if (region_map_prev.width() != image_prev.width() || region_map_prev.height() != image_prev.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width or height of region_map_prev and image_prev not match" << std::endl; throw std::invalid_argument("width or height of region_map_prev and image_prev not match"); } else if (region_map_current.width() != image_current.width() || region_map_current.height() != image_current.height()) { std::cerr << "BlockMatching<T>::BlockMatching(const ImgVector<T>&, const ImgVector<T>&, const int) : width or height of region_map_current and image_current not match" << std::endl; throw std::invalid_argument("width or height of region_map_current and image_current not match"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = 1; _cells_width = _width; _cells_height = _height; _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _image_next.copy(image_next); _region_map_prev.copy(region_map_prev); _region_map_current.copy(region_map_current); _region_map_next.copy(region_map_next); // Normalize the image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Normalize the input images" << std::endl; #endif image_normalizer(); // Extract connected regions from region_map #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Collect connected region from region map" << std::endl; #endif get_connected_regions(&_connected_regions_prev, region_map_prev); get_connected_regions(&_connected_regions_current, region_map_current); get_connected_regions(&_connected_regions_next, region_map_next); // Get color quantized image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Get color quantized image" << std::endl; #endif get_color_quantized_image(&_color_quantized_prev, _image_prev, _connected_regions_prev); get_color_quantized_image(&_color_quantized_current, _image_current, _connected_regions_current); get_color_quantized_image(&_color_quantized_next, _image_next, _connected_regions_next); } template <class T> BlockMatching<T>::BlockMatching(const BlockMatching& copy) { _width = copy._width; _height = copy._height; _block_size = copy._block_size; _cells_width = copy._cells_width; _cells_height = copy._cells_height; _subpixel_scale = copy._subpixel_scale; _image_prev.copy(copy._image_prev); _image_current.copy(copy._image_current); _image_next.copy(copy._image_next); _region_map_prev.copy(copy._region_map_prev); _region_map_current.copy(copy._region_map_current); _region_map_next.copy(copy._region_map_next); _color_quantized_prev.copy(copy._color_quantized_prev); _color_quantized_current.copy(copy._color_quantized_current); _color_quantized_next.copy(copy._color_quantized_next); _connected_regions_prev.assign(copy._connected_regions_prev.begin(), copy._connected_regions_prev.end()); _connected_regions_current.assign(copy._connected_regions_current.begin(), copy._connected_regions_current.end()); _connected_regions_next.assign(copy._connected_regions_next.begin(), copy._connected_regions_next.end()); _motion_vector_time.copy(copy._motion_vector_time); _motion_vector_prev.copy(copy._motion_vector_prev); _motion_vector_next.copy(copy._motion_vector_next); } template <class T> BlockMatching<T>::~BlockMatching(void) { } template <class T> void BlockMatching<T>::reset(const ImgVector<T>& image_prev, const ImgVector<T>& image_current, const int BlockSize, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_prev"); } else if (image_prev.width() != image_current.width() || image_prev.height() != image_current.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_prev, const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("width or height of image_prev and image_current not match"); } else if (BlockSize < 0) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const int) : const int BlockSize" << std::endl; throw std::out_of_range("BlockSize"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = BlockSize; _cells_width = int(ceil(double(_width) / double(_block_size))); _cells_height = int(ceil(double(_height) / double(_block_size))); _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _image_next.clear(); _region_map_prev.clear(); _region_map_current.clear(); _region_map_next.clear(); _connected_regions_prev.clear(); _connected_regions_current.clear(); _connected_regions_next.clear(); _motion_vector_time.clear(); _motion_vector_prev.clear(); _motion_vector_next.clear(); // Normalize the image image_normalizer(); } template <class T> void BlockMatching<T>::reset(const ImgVector<T>& image_prev, const ImgVector<T>& image_current, const ImgVector<T>& image_next, const int BlockSize, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_prev"); } else if (image_prev.width() != image_current.width() || image_prev.height() != image_current.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const int) : const ImgVector<T>& image_prev, const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("width or height of image_prev and image_current not match"); } else if (BlockSize < 0) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const int) : const int BlockSize" << std::endl; throw std::out_of_range("BlockSize"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = BlockSize; _cells_width = int(ceil(double(_width) / double(_block_size))); _cells_height = int(ceil(double(_height) / double(_block_size))); _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _image_next.copy(image_next); _region_map_prev.clear(); _region_map_current.clear(); _region_map_next.clear(); _connected_regions_prev.clear(); _connected_regions_current.clear(); _connected_regions_next.clear(); _motion_vector_time.clear(); _motion_vector_prev.clear(); _motion_vector_next.clear(); // Normalize the image image_normalizer(); } template <class T> void BlockMatching<T>::reset(const ImgVector<T>& image_prev, const ImgVector<size_t>& region_map_prev, const ImgVector<T>& image_current, const ImgVector<size_t>& region_map_current, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_prev"); } else if (image_prev.width() != image_current.width() || image_prev.height() != image_current.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<T>& image_prev, const ImgVector<T>& image_current" << std::endl; throw std::invalid_argument("width or height of image_prev and image_current not match"); } else if (region_map_prev.width() != image_prev.width() || region_map_prev.height() != image_prev.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<int>& region_map_prev" << std::endl; throw std::invalid_argument("width or height of region_map_prev not match with image_prev"); } else if (region_map_current.width() != image_current.width() || region_map_current.height() != image_current.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<int>& region_map_current" << std::endl; throw std::invalid_argument("width or height of region_map_current not match with image_current"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = 1; _cells_width = _width; _cells_height = _height; _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _image_next.clear(); _region_map_prev.copy(region_map_prev); _region_map_current.copy(region_map_current); _region_map_next.clear(); _motion_vector_time.clear(); _motion_vector_prev.clear(); _motion_vector_next.clear(); // Normalize the image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Normalize the input images" << std::endl; #endif image_normalizer(); // Extract connected regions from region_map #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Collect connected region from region map" << std::endl; #endif get_connected_regions(&_connected_regions_prev, region_map_prev); get_connected_regions(&_connected_regions_current, region_map_current); _connected_regions_next.clear(); // Get color quantized image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Get color quantized image" << std::endl; #endif get_color_quantized_image(&_color_quantized_prev, _image_prev, _connected_regions_prev); get_color_quantized_image(&_color_quantized_current, _image_current, _connected_regions_current); _color_quantized_next.clear(); } template <class T> void BlockMatching<T>::reset(const ImgVector<T>& image_prev, const ImgVector<size_t>& region_map_prev, const ImgVector<T>& image_current, const ImgVector<size_t>& region_map_current, const ImgVector<T>& image_next, const ImgVector<size_t>& region_map_next, const int Subpixel_Scale) { _width = 0; _height = 0; _block_size = 0; _cells_width = 0; _cells_height = 0; _subpixel_scale = 1; if (image_prev.isNULL()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<T>& image_prev" << std::endl; throw std::invalid_argument("const ImgVector<T>& image_prev"); } else if (image_prev.width() != image_current.width() || image_prev.height() != image_current.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<T>& image_prev, const ImgVector<T>& image_current" << std::endl; throw std::invalid_argument("width or height of image_prev and image_current not match"); } else if (region_map_prev.width() != image_prev.width() || region_map_prev.height() != image_prev.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<int>& region_map_prev" << std::endl; throw std::invalid_argument("width or height of region_map_prev not match with image_prev"); } else if (region_map_current.width() != image_current.width() || region_map_current.height() != image_current.height()) { std::cerr << "void BlockMatching<T>::reset(const ImgVector<T>&, const ImgVector<T>&, const ImgVector<int>&) : const ImgVector<int>& region_map_current" << std::endl; throw std::invalid_argument("width or height of region_map_current not match with image_current"); } _width = image_prev.width(); _height = image_prev.height(); _block_size = 1; _cells_width = _width; _cells_height = _height; _subpixel_scale = Subpixel_Scale; _image_prev.copy(image_prev); _image_current.copy(image_current); _image_next.copy(image_next); _region_map_prev.copy(region_map_prev); _region_map_current.copy(region_map_current); _region_map_next.copy(region_map_next); _motion_vector_time.clear(); _motion_vector_prev.clear(); _motion_vector_next.clear(); // Normalize the image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Normalize the input images" << std::endl; #endif image_normalizer(); // Extract connected regions from region_map #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Collect connected region from region map" << std::endl; #endif get_connected_regions(&_connected_regions_prev, region_map_prev); get_connected_regions(&_connected_regions_current, region_map_current); get_connected_regions(&_connected_regions_next, region_map_next); // Get color quantized image #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_BLOCKMATCHING) std::cout << " Block Matching : Get color quantized image" << std::endl; #endif get_color_quantized_image(&_color_quantized_prev, _image_prev, _connected_regions_prev); get_color_quantized_image(&_color_quantized_current, _image_current, _connected_regions_current); get_color_quantized_image(&_color_quantized_next, _image_next, _connected_regions_next); } /* Get connected region list * * If region_map[i] < 0 then it means the pixel is neglected. * So usually region_map include only the integer n > 0. */ template <class T> void BlockMatching<T>::get_connected_regions(std::vector<std::vector<VECTOR_2D<int> > >* connected_regions, const ImgVector<size_t>& region_map) { const VECTOR_2D<int> adjacent[8] = { VECTOR_2D<int>(-1, -1), VECTOR_2D<int>(0, -1), VECTOR_2D<int>(1, -1), VECTOR_2D<int>(-1, 0), VECTOR_2D<int>(1, 0), VECTOR_2D<int>(-1, 1), VECTOR_2D<int>(0, 1), VECTOR_2D<int>(1, 1)}; std::list<std::list<VECTOR_2D<int> > > tmp_list; ImgVector<bool> collected(_width, _height, false); // Clear the vector connected_regions->clear(); for (int y = 0; y < _height; y++) { for (int x = 0; x < _width; x++) { if (collected.get(x, y) == false) { size_t num = region_map.get(x, y); collected.at(x, y) = true; tmp_list.push_back(std::list<VECTOR_2D<int> >(0)); // Add new region pixel list VECTOR_2D<int> r(x, y); tmp_list.back().push_back(r); // Add first element for (std::list<VECTOR_2D<int> >::const_iterator ite = tmp_list.back().begin(); ite != tmp_list.back().end(); ++ite) { for (int k = 0; k < 8; k++) { r.x = ite->x + adjacent[k].x; r.y = ite->y + adjacent[k].y; if (0 <= r.x && r.x < _width && 0 <= r.y && r.y < _height && collected.get(r.x, r.y) == false && region_map.get(r.x, r.y) == num) { collected.at(r.x, r.y) = true; tmp_list.back().push_back(r); } } } } } } connected_regions->resize(tmp_list.size()); // Copy extracted connected region to std::vector _connected_regions std::list<std::list<VECTOR_2D<int> > >::iterator ite = tmp_list.begin(); for (size_t n = 0; n < connected_regions->size(); ++ite, n++) { connected_regions->at(n).assign(ite->begin(), ite->end()); } } // ----- Normalizer ----- template <class T> void BlockMatching<T>::image_normalizer(void) { double max_int = _image_prev.max(); if (max_int > 1.0) { _image_prev /= max_int; } max_int = _image_current.max(); if (max_int > 1.0) { _image_current /= max_int; } max_int = _image_next.max(); if (max_int > 1.0) { _image_next /= max_int; } } template <> void BlockMatching<ImgClass::RGB>::image_normalizer(void); template <> void BlockMatching<ImgClass::Lab>::image_normalizer(void); // ----- Decrease Color ----- template <class T> void BlockMatching<T>::get_color_quantized_image(ImgVector<T>* decreased_color_image, const ImgVector<T>& image, const std::vector<std::vector<VECTOR_2D<int> > >& connected_regions) { decreased_color_image->reset(_width, _height); unsigned int n; #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) #endif for (n = 0; n < connected_regions.size(); n++) { T sum_color = T(); for (const VECTOR_2D<int>& r : connected_regions[n]) { sum_color += image.get(r.x, r.y); } T mean_color = sum_color / double(connected_regions[n].size()); for (const VECTOR_2D<int>& r : connected_regions[n]) { decreased_color_image->at(r.x, r.y) = mean_color; } } }
DRB095-doall2-taskloop-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Only one loop is associated with omp taskloop. The inner loop's loop iteration variable will be shared if it is shared in the enclosing context. Data race pairs (we allow multiple ones to preserve the pattern): Write_set = {j@69:14, j@69:30} Read_set = {j@69:21, j@69:30, j@70:16} Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair. */ #include <stdio.h> int a[100][100]; int main() { int i, j; int _ret_val_0; #pragma cetus private(i, j) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#0#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<100; j ++ ) { a[i][j]=(i+j); } } #pragma cetus private(i, j) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i, j) for (i=0; i<100; i ++ ) { #pragma cetus private(j) #pragma loop name main#1#0 #pragma cetus parallel #pragma omp parallel for private(j) for (j=0; j<100; j ++ ) { a[i][j]+=1; } } printf("a[50][50]=%d\n", a[50][50]); _ret_val_0=0; return _ret_val_0; }
Mrpt.h
#ifndef CPP_MRPT_H_ #define CPP_MRPT_H_ #include <algorithm> #include <functional> #include <numeric> #include <random> #include <string> #include <vector> #include <cmath> #include <Eigen/Dense> #include <Eigen/SparseCore> using namespace Eigen; class Mrpt { public: /** * The constructor of the index.The constructor does not actually build * the index but that is done by the function 'grow' which has to be called * before queries can be made. * @param X_ - Pointer to the Eigen::Map which refers to the data matrix. */ Mrpt(const Map<const MatrixXf> *X_) : X(X_), n_samples(X_->cols()), dim(X_->rows()) {} ~Mrpt() {} /** * The function whose call starts the actual index construction. Initializes * arrays to store the tree structures and computes all the projections needed * later. Then repeatedly calls method grow_subtree that builds a single RP-tree. * @param n_trees_ - The number of trees to be used in the index. * @param depth_ - The depth of the trees. * @param density_ - Expected ratio of non-zero components in a projection matrix. * @param seed - A seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with rd() */ void grow(int n_trees_, int depth_, float density_, int seed = 0) { n_trees = n_trees_; depth = depth_; density = density_; n_pool = n_trees_ * depth_; n_array = 1 << (depth_ + 1); density < 1 ? build_sparse_random_matrix(seed) : build_dense_random_matrix(seed); split_points = MatrixXf(n_array, n_trees); tree_leaves = std::vector<std::vector<int>>(n_trees); count_first_leaf_indices(); #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { MatrixXf tree_projections; if (density < 1) tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * *X; else tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * *X; tree_leaves[n_tree] = std::vector<int>(n_samples); std::vector<int> &indices = tree_leaves[n_tree]; std::iota(indices.begin(), indices.end(), 0); grow_subtree(indices.begin(), indices.end(), 0, 0, n_tree, tree_projections); } } /** * This function finds the k approximate nearest neighbors of the query object * q. The accuracy of the query depends on both the parameters used for index * construction and additional parameters given to this function. This * function implements two tricks to improve performance. The voting trick * interprets each index object in leaves returned by tree traversals as votes, * and only performs the final linear search with the 'elect' most voted * objects. * @param q - The query object whose neighbors the function finds * @param k - The number of neighbors the user wants the function to return * @param votes_required - The number of votes required for an object to be included in the linear search step * @param out - The output buffer for the indices of the k approximate nearest neighbors * @param out_distances - Output buffer for distances of the k approximate nearest neighbors (optional parameter) * @return */ void query(const Map<VectorXf> &q, int k, int votes_required, int *out, float *out_distances = nullptr) const { VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; std::vector<int> found_leaves(n_trees); /* * The following loops over all trees, and routes the query to exactly one * leaf in each. */ #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } } found_leaves[n_tree] = idx_tree - (1 << depth) + 1; } int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1; VectorXi elected(n_trees * max_leaf_size); VectorXi votes = VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int leaf_begin = leaf_first_indices[found_leaves[n_tree]]; int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; if (++votes(idx) == votes_required) elected(n_elected++) = idx; } } if (n_elected < k) { /* * If not enough samples had at least votes_required * votes, find the maximum amount of votes needed such * that the final search set size has at least k samples */ VectorXf::Index max_index; votes.maxCoeff(&max_index); int max_votes = votes(max_index); VectorXi vote_count = VectorXi::Zero(max_votes + 1); for (int i = 0; i < n_samples; ++i) vote_count(votes(i))++; for (int would_elect = 0; max_votes; --max_votes) { would_elect += vote_count(max_votes); if (would_elect >= k) break; } for (int i = 0; i < n_samples; ++i) { if (votes(i) >= max_votes && votes(i) < votes_required) elected(n_elected++) = i; } } exact_knn(q, k, elected, n_elected, out, out_distances); } /** * find k nearest neighbors from data for the query point * @param q - query point as a vector * @param k - number of neighbors searched for * @param indices - indices of the points in the original matrix where the search is made * @param out - output buffer for the indices of the k approximate nearest neighbors * @param out_distances - output buffer for distances of the k approximate nearest neighbors (optional parameter) * @return */ void exact_knn(const Map<VectorXf> &q, int k, const VectorXi &indices, int n_elected, int *out, float *out_distances = nullptr) const { VectorXf distances(n_elected); #pragma omp parallel for for (int i = 0; i < n_elected; ++i) distances(i) = (X->col(indices(i)) - q).squaredNorm(); if (k == 1) { MatrixXf::Index index; distances.minCoeff(&index); out[0] = indices(index); if(out_distances) { out_distances[0] = std::sqrt(distances(index)); } return; } VectorXi idx(n_elected); std::iota(idx.data(), idx.data() + n_elected, 0); std::partial_sort(idx.data(), idx.data() + k, idx.data() + n_elected, [&distances](int i1, int i2) {return distances(i1) < distances(i2);}); for (int i = 0; i < k; ++i) out[i] = indices(idx(i)); if(out_distances) { for(int i = 0; i < k; ++i) out_distances[i] = std::sqrt(distances(idx(i))); } } /** * Saves the index to a file. * @param path - Filepath to the output file. * @return True if saving succeeded, false otherwise. */ bool save(const char *path) const { FILE *fd; if ((fd = fopen(path, "wb")) == NULL) return false; fwrite(&n_trees, sizeof(int), 1, fd); fwrite(&depth, sizeof(int), 1, fd); fwrite(&density, sizeof(float), 1, fd); fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd); // save tree leaves for (int i = 0; i < n_trees; ++i) { int sz = tree_leaves[i].size(); fwrite(&sz, sizeof(int), 1, fd); fwrite(&tree_leaves[i][0], sizeof(int), sz, fd); } // save random matrix if (density < 1) { int non_zeros = sparse_random_matrix.nonZeros(); fwrite(&non_zeros, sizeof(int), 1, fd); for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) { for (SparseMatrix<float, RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) { float val = it.value(); int row = it.row(), col = it.col(); fwrite(&row, sizeof(int), 1, fd); fwrite(&col, sizeof(int), 1, fd); fwrite(&val, sizeof(float), 1, fd); } } } else { fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Loads the index from a file. * @param path - Filepath to the index file. * @return True if loading succeeded, false otherwise. */ bool load(const char *path) { FILE *fd; if ((fd = fopen(path, "rb")) == NULL) return false; fread(&n_trees, sizeof(int), 1, fd); fread(&depth, sizeof(int), 1, fd); fread(&density, sizeof(float), 1, fd); n_pool = n_trees * depth; n_array = 1 << (depth + 1); count_first_leaf_indices(); split_points = MatrixXf(n_array, n_trees); fread(split_points.data(), sizeof(float), n_array * n_trees, fd); // load tree leaves tree_leaves = std::vector<std::vector<int>>(n_trees); for (int i = 0; i < n_trees; ++i) { int sz; fread(&sz, sizeof(int), 1, fd); std::vector<int> leaves(sz); fread(&leaves[0], sizeof(int), sz, fd); tree_leaves[i] = leaves; } // load random matrix if (density < 1) { int non_zeros; fread(&non_zeros, sizeof(int), 1, fd); sparse_random_matrix = SparseMatrix<float>(n_pool, dim); std::vector<Triplet<float>> triplets; for (int k = 0; k < non_zeros; ++k) { int row, col; float val; fread(&row, sizeof(int), 1, fd); fread(&col, sizeof(int), 1, fd); fread(&val, sizeof(float), 1, fd); triplets.push_back(Triplet<float>(row, col, val)); } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } else { dense_random_matrix = Matrix<float, Dynamic, Dynamic, RowMajor>(n_pool, dim); fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Accessor for split points of trees (for testing purposes) * @param tree - index of tree in (0, ... , T-1) * @param index - the index of branch in (0, ... , (2^depth) - 1): * 0 = root * 1 = first branch of first level * 2 = second branch of first level * 3 = first branch of second level etc. * @return split point of index:th branch of tree:th tree */ float get_split_point(int tree, int index) const { return split_points(index, tree); } /** * Accessor for point stored in leaves of trees (for testing purposes) * @param tree - index of tree in (0, ... T-1) * @param leaf - index of leaf in (0, ... , 2^depth) * @param index - index of a data point in a leaf * @return index of index:th data point in leaf:th leaf of tree:th tree */ int get_leaf_point(int tree, int leaf, int index) const { int leaf_begin = leaf_first_indices[leaf]; return tree_leaves[tree][leaf_begin + index]; } /** * Accessor for the number of points in a leaf of a tree (for test purposes) * @param tree - index of tree in (0, ... T-1) * @param leaf - index of leaf in (0, ... , 2^depth) * @return - number of data points in leaf:th leaf of tree:th tree */ int get_leaf_size(int tree, int leaf) const { return leaf_first_indices[leaf + 1] - leaf_first_indices[leaf]; } /** * @return - number of trees in the index */ int get_n_trees() const { return split_points.cols(); } /** * @return - depth of trees of index */ int get_depth() const { if(sparse_random_matrix.rows() > 0) { return sparse_random_matrix.rows() / get_n_trees(); } else { return dense_random_matrix.rows() / get_n_trees(); } } /** * @return - number of points of the data set from which the index is built */ int get_n_points() const { return n_samples; } private: /** * Builds a single random projection tree. The tree is constructed by recursively * projecting the data on a random vector and splitting into two by the median. * @param begin - iterator to the index of the first data point of this branch * @param end - iterator to the index of the last data point of this branch * @param tree_level - The level in tree where the recursion is at * @param i - The index within the tree where we are at * @param n_tree - The index of the tree within the index * @param tree_projections - Precalculated projection values for the current tree */ void grow_subtree(std::vector<int>::iterator begin, std::vector<int>::iterator end, int tree_level, int i, int n_tree, const MatrixXf &tree_projections) { int n = end - begin; int idx_left = 2 * i + 1; int idx_right = idx_left + 1; if (tree_level == depth) return; std::nth_element(begin, begin + n/2, end, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); auto mid = end - n/2; if(n % 2) { split_points(i, n_tree) = tree_projections(tree_level, *(mid - 1)); } else { auto left_it = std::max_element(begin, mid, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); split_points(i, n_tree) = (tree_projections(tree_level, *mid) + tree_projections(tree_level, *left_it)) / 2.0; } grow_subtree(begin, mid, tree_level + 1, idx_left, n_tree, tree_projections); grow_subtree(mid, end, tree_level + 1, idx_right, n_tree, tree_projections); } /** * Builds a random sparse matrix for use in random projection. The components of * the matrix are drawn from the distribution * * 0 w.p. 1 - a * N(0, 1) w.p. a * * where a = density. * * @param seed - A seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with rd() */ void build_sparse_random_matrix(int seed = 0) { sparse_random_matrix = SparseMatrix<float, RowMajor>(n_pool, dim); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::uniform_real_distribution<float> uni_dist(0, 1); std::normal_distribution<float> norm_dist(0, 1); std::vector<Triplet<float>> triplets; for (int j = 0; j < n_pool; ++j) { for (int i = 0; i < dim; ++i) { if (uni_dist(gen) > density) continue; triplets.push_back(Triplet<float>(j, i, norm_dist(gen))); } } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } /* * Builds a random dense matrix for use in random projection. The components of * the matrix are drawn from the standard normal distribution. * @param seed - A seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with rd() */ void build_dense_random_matrix(int seed = 0) { dense_random_matrix = Matrix<float, Dynamic, Dynamic, RowMajor>(n_pool, dim); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::normal_distribution<float> normal_dist(0, 1); std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_pool * dim, [&normal_dist, &gen] { return normal_dist(gen); }); } /** * Computes the leaf sizes of a tree assuming a median split and that * when the number points is odd, the extra point is always assigned to * to the left branch. * @param n - number data points * @param level - current level of the tree * @param tree_depth - depth of the whole tree * @param out_leaf_sizes - vector for the output; after completing * the function is a vector of length n containing the leaf sizes */ void count_leaf_sizes(int n, int level, int tree_depth, std::vector<int> &out_leaf_sizes) { if(level == tree_depth) { out_leaf_sizes.push_back(n); return; } count_leaf_sizes(n - n/2, level + 1, tree_depth, out_leaf_sizes); count_leaf_sizes(n/2, level + 1, tree_depth, out_leaf_sizes); } /** * Computes indices of the first elements of leaves in a vector containing * all the leaves of a tree concatenated. Assumes that median split is used * and when the number points is odd, the extra point is always assigned to * to the left branch. */ void count_first_leaf_indices() { std::vector<int> leaf_sizes; count_leaf_sizes(n_samples, 0, depth, leaf_sizes); leaf_first_indices = std::vector<int>(leaf_sizes.size() + 1); leaf_first_indices[0] = 0; for(int i = 0; i < leaf_sizes.size(); ++i) leaf_first_indices[i+1] = leaf_first_indices[i] + leaf_sizes[i]; } const Map<const MatrixXf> *X; // the data matrix MatrixXf split_points; // all split points in all trees std::vector<std::vector<int>> tree_leaves; // contains all leaves of all trees Matrix<float, Dynamic, Dynamic, RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees SparseMatrix<float, RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees std::vector<int> leaf_first_indices; // first indices of each leaf of tree in tree_leaves const int n_samples; // sample size of data const int dim; // dimension of data int n_trees; // number of RP-trees int depth; // depth of an RP-tree with median split float density; // expected ratio of non-zero components in a projection matrix int n_pool; // amount of random vectors needed for all the RP-trees int n_array; // length of the one RP-tree as array }; #endif // CPP_MRPT_H_
GMS_simd_memops.h
#ifndef __GMS_SIMD_MEMOPS_H__ #define __GMS_SIMD_MEMOPS_H__ namespace file_info { const unsigned int gGMS_SIMD_MEMOPS_MAJOR = 1U; const unsigned int gGMS_SIMD_MEMOPS_MINOR = 1U; const unsigned int gGMS_SIMD_MEMOPS_MICRO = 0U; const unsigned int gGMS_SIMD_MEMOPS_FULLVER = 1000U*gGMS_SIMD_MEMOPS_MAJOR+100U*gGMS_SIMD_MEMOPS_MINOR+10U*gGMS_SIMD_MEMOPS_MICRO; const char * const pgGMS_SIMD_MEMOPS_CREATE_DATE = "26-09-2020 3:47PM +00200 (SAT 26 SEP 2020 GMT+2)"; const char * const pgGMS_SIMD_MEMOPS_BUILD_DATE = __DATE__ ":" __TIME__; const char * const pgGMS_SIMD_MEMOPS_AUTHOR = "Programmer: Bernard Gingold, contact: beniekg@gmail.com"; } #include <immintrin.h> #include <cstdint> #if defined __GNUC__ && !defined __INTEL_COMPILER #include <omp.h> #endif #include "GMS_config.h" #include "GMS_avxvecf32.h" #include "GMS_avxc8f32.h" #include "GMS_avx512vec16.h" #if !defined (MEMMOVE_1ELEM) #define MEMMOVE_1ELEM 1 #endif #if !defined (MEMMOVE_16ELEMS) #define MEMMOVE_16ELEMS 16 #endif #if !defined (MEMMOVE_32ELEMS) #define MEMMOVE_32ELEMS 32 #endif #if !defined (MEMMOVE_64ELEMS) #define MEMMOVE_64ELEMS 64 #endif #if !defined (MEMMOVE_128ELEMS) #define MEMMOVE_128ELEMS 128 #endif #if !defined (MEMMOVE_256ELEMS) #define MEMMOVE_256ELEMS 256 #endif // float type (4-bytes) #define YMM_LEN (8) #define ZMM_LEN (16) #if !defined (PAGE4KiB) #define PAGE4KiB 4096 #endif #if !defined (MAXFLOATSPERPAGE4KiB) #define MAXFLOATSPERPAGE4KiB 1024 #endif #if !defined (min_val) #define min_val(A,B) ((A)<(B)?(A):(B)) #endif namespace gms { namespace common { __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void init_unroll2x_cmplxr4(std::complex<float> * __restrict __ATTR_ALIGN__(32) vc4, const int32_t len, const std::complex<float> val) { #if defined __GNUC__ && !defined __INTEL_COMPILER vc4 = (std::complex<float>*)__builtin_assume_aligned(vc4,32); #pragma omp simd aligned(vc4:32) #elif defined __INTEL_COMPILER __assume_aligned(vc4,32); #pragma vector always #endif for(int32_t i = 0; i != len-1; i += 2) { vc4[i+0] = val; vc4[i+1] = val; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void init_unroll4x_cmplxr4(std::complex<float> * __restrict __ATTR_ALIGN__(32) vc4, const int32_t len, const std::complex<float> val) { #if defined __GNUC__ && !defined __INTEL_COMPILER vc4 = (std::complex<float>*)__builtin_assume_aligned(vc4,32); #pragma omp simd aligned(vc4:32) #elif defined __INTEL_COMPILER __assume_aligned(vc4,32); #pragma vector always #endif for(int32_t i = 0; i != len-3; i += 4) { vc4[i+0] = val; vc4[i+1] = val; vc4[i+2] = val; vc4[i+3] = val; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void init_unroll8x_cmplxr4(std::complex<float> * __restrict __ATTR_ALIGN__(32) vc4, const int32_t len, const std::complex<float> val) { #if defined __GNUC__ && !defined __INTEL_COMPILER vc4 = (std::complex<float>*)__builtin_assume_aligned(vc4,32); #pragma omp simd aligned(vc4:32) #elif defined __INTEL_COMPILER __assume_aligned(vc4,32); #pragma vector always #endif for(int32_t i = 0; i != len-7; i += 8) { vc4[i+0LL] = val; vc4[i+1LL] = val; vc4[i+2LL] = val; vc4[i+3LL] = val; vc4[i+4LL] = val; vc4[i+5LL] = val; vc4[i+6LL] = val; vc4[i+7LL] = val; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_init_unroll2x(AVXVec8 * __restrict __ATTR_ALIGN__(32) vec8, const int32_t len, const AVXVec8 v) { #if defined __GNUC__ && !defined __INTEL_COMPILER vec8 = (AVXVec8*)__builtin_assume_aligned(vec8,32); #elif defined __INTEL_COMPILER assume_aligned(vec8,32); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(vec8:32) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-1; i += 2) { vec8[i+0] = v; vec8[i+1] = v; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_init_unroll4x(AVXVec8 * __restrict __ATTR_ALIGN__(32) vec8, const int32_t len, const AVXVec8 v) { #if defined __GNUC__ vec8 = (AVXVec8*)__builtin_assume_aligned(vec8,32); #elif defined __INTEL_COMPILER assume_aligned(vec8,32); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(vec8:32) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-3; i += 4) { vec8[i+0] = v; vec8[i+1] = v; vec8[i+2] = v; vec8[i+3] = v; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_init_unroll8x(AVXVec8 * __restrict __ATTR_ALIGN__(32) vec8, const int32_t len, const AVXVec8 v) { #if defined __GNUC__ && !defined __INTEL_COMPILER vec8 = (AVXVec8*)__builtin_assume_aligned(vec8,32); #elif defined __INTEL_COMPILER __assume_aligned(vec8,32) #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(vec8:32) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-7; i += 8) { vec8[i+0LL] = v; vec8[i+1LL] = v; vec8[i+2LL] = v; vec8[i+3LL] = v; vec8[i+4LL] = v; vec8[i+5LL] = v; vec8[i+6LL] = v; vec8[i+7LL] = v; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_copy_unroll2x(AVXVec8 * __restrict __ATTR_ALIGN__(32) dst, const AVXVec8 * __restrict __ATTR_ALIGN__(32) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXVec8*)__builtin_assume_aligned(dst,32); src = (const AVXVec8*)__builtin_assume_aligned(src,32); #elif defined __INTEL_COMPILER __assume_aligned(dst,32); __assume_aligned(src,32); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(src,dst:32) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-1; i += 2) { dst[i+0] = src[i+0]; dst[i+1] = src[i+1]; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_copy_unroll4x(AVXVec8 * __restrict __ATTR_ALIGN__(32) dst, const AVXVec8 * __restrict __ATTR_ALIGN__(32) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXVec8*)__builtin_assume_aligned(dst,32); src = (const AVXVec8*)__builtin_assume_aligned(src,32); #elif defined __INTEL_COMPILER __assume_aligned(dst,32); __assume_aligned(src,32); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(dst,src:32); #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-3; i += 4) { dst[i+0] = src[i+0]; dst[i+1] = src[i+1]; dst[i+2] = src[i+2]; dst[i+3] = src[i+3]; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_copy_unroll8x(AVXVec8 * __restrict __ATTR_ALIGN__(32) dst, const AVXVec8 * __restrict __ATTR_ALIGN__(32) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXVec8*)__builtin_assume_aligned(dst,32); src = (const AVXVec8*)__builtin_assume_aligned(src,32); #elif defined __INTEL_COMPILER __assume_aligned(dst,32); __assume_aligned(src,32); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(dst,src:32); #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-7; i += 8) { dst[i+0] = src[i+0]; dst[i+1] = src[i+1]; dst[i+2] = src[i+2]; dst[i+3] = src[i+3]; dst[i+4] = src[i+4]; dst[i+5] = src[i+5]; dst[i+6] = src[i+6]; dst[i+7] = src[i+7]; } } #if defined __AVX512F__ __ATTR_ALWAYS_INLINE__ __ATTR_VECTORCALL__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_init_unroll2x(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) vec16, const int32_t len, const AVX512Vec16 v) { #if defined __GNUC__ && !defined __INTEL_COMPILER vec16 = (AVX512Vec16*)__builtin_assume_aligned(vec16,64); #elif defined __ICC || defined __INTEL_COMPILER __assume_aligned(vec16,64); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(vec16:64) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-1; i += 2) { vec16[i+0] = v; vec16[i+1] = v; } } __ATTR_ALWAYS_INLINE__ __ATTR_VECTORCALL__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_init_unroll4x(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) vec16, const int32_t len, const AVX512Vec16 v) { #if defined __GNUC__ && !defined __INTEL_COMPILER vec16 = (AVX512Vec16*)__builtin_assume_aligned(vec16,64); #elif defined __ICC || defined __INTEL_COMPILER __assume_aligned(vec16,64); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(vec16:64) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int64_t i = 0; i != len-3; i += 4) { vec16[i+0] = v; vec16[i+1] = v; vec16[i+2] = v; vec16[i+3] = v; } } __ATTR_ALWAYS_INLINE__ __ATTR_VECTORCALL__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_init_unroll8x(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) vec16, const int32_t len, const AVX512Vec16 v) { #if defined __GNUC__ && !defined __INTEL_COMPILER vec16 = (AVX512Vec16*)__builtin_assume_aligned(vec16,64); #elif defined __ICC || defined __INTEL_COMPILER __assume_aligned(vec16,64); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(vec16:64) #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int64_t i = 0; i != len-7; i += 8) { vec16[i+0] = v; vec16[i+1] = v; vec16[i+2] = v; vec16[i+3] = v; vec16[i+4] = v; vec16[i+5] = v; vec16[i+6] = v; vec16[i+7] = v; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_copy_unroll2x(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) dst, const AVX512Vec16 * __restrict __ATTR_ALIGN__(64) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXV512ec16*)__builtin_assume_aligned(dst,64); src = (const AVX512Vec16*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(dst,src:64); #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-1; i += 2) { dst[i+0] = src[i+0]; dst[i+1] = src[i+1]; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_copy_unroll4x(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) dst, const AVX512Vec16 * __restrict __ATTR_ALIGN__(64) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXV512ec16*)__builtin_assume_aligned(dst,64); src = (const AVX512Vec16*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(dst,src:64); #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int32_t i = 0; i != len-3; i += 4) { dst[i+0] = src[i+0]; dst[i+1] = src[i+1]; dst[i+2] = src[i+2]; dst[i+3] = src[i+3]; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_copy_unroll8x(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) dst, const AVX512Vec16 * __restrict __ATTR_ALIGN__(64) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXV512ec16*)__builtin_assume_aligned(dst,64); src = (const AVX512Vec16*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif #if defined __GNUC__ && !defined __INTEL_COMPILER #pragma omp simd aligned(dst,src:64); #elif defined __INTEL_COMPILER #pragma vector always #pragma code_align(32) #endif for(int64_t i = 0; i != len-7; i += 8) { dst[i+0] = src[i+0]; dst[i+1] = src[i+1]; dst[i+2] = src[i+2]; dst[i+3] = src[i+3]; dst[i+4] = src[i+4]; dst[i+5] = src[i+5]; dst[i+6] = src[i+6]; dst[i+7] = src[i+7]; } } #endif __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxvec8_copy_from_r4(AVXVec8 * __restrict __ATTR_ALIGN__(32) dst, const float * __restrict __ATTR_ALIGN__(32) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXVec8*)__builtin_assume_aligned(dst,64); src = (const float*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif int32_t j; j = 0; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif for(int32_t i = 0; i != len; i += 8) { const __m256 t = _mm256_load_ps(&src[i]); dst[j].m_v8 = t; ++j; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void r4_copy_from_avxvec8(float * __restrict _ATTR_ALIGN__(32) dst, const AVXVec8 * __restrict __ATTR_ALIGN__(64) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXVec8*)__builtin_assume_aligned(dst,64); src = (const float*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif int32_t j; j = 0; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif for(int32_t i = 0; i != len; i += 8) { const __m256 t = src[j].m_v8; _mm256_store_ps(&dst[i],t); ++j; } } #if defined __AVX512F__ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void r4_copy_from_avx512vec16(float * __restrict __ATTR_ALIGN__(64) dst, const AVX512Vec16 * __restrict _ATTTR_ALIGN__(64) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVX512Vec16*)__builtin_assume_aligned(dst,64); src = (const float*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif int32_t j; j = 0; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif for(int32_t i = 0; i != len; i += 16) { const __m512 t = src[j].m_v16; _mm512_store_ps(&dst[i],t); ++j; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512vec16_copy_from_r4(AVX512Vec16 * __restrict __ATTR_ALIGN__(64) dst, const float * __restrict __ATTR_ALIGN__(64) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVX512Vec16*)__builtin_assume_aligned(dst,64); src = (const float*)__builtin_assume_aligned(src,64); #elif defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src,64); #endif int32_t j; j = 0; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif for(int32_t i = 0; i != len; i += 16){ const __m512 t = _mm512_load_ps(&src[i]); dst[j].m_v16 = t; ++j; } } #endif __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avxc8f32_copy_from_r4(AVXc8f32 * __restrict __ATTR_ALIGN__(32) dst, const float * __restrict __ATTR_ALIGN__(32) src_re, const float * __restrict _ATTR_ALIGN__(32) src_im, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst = (AVXc8f32*)__builtin_assume_aligned(dst,64); src_re = (const float*)__builtin_assume_aligned(src_re,64); src_im = (const float*)__builtin_assume_aligned(src_im,64); #elif defined __ICC || defined __INTEL_COMPILER __assume_aligned(dst,64); __assume_aligned(src_re,64); __assume_aligned(src_im,64); #endif int32_t j; j = 0; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif for(int64_t i = 0LL; i != len; i += 8LL) { const __m256 tre = _mm256_load_ps(&src_re[i]); dst[j].m_re = tre; const __m256 tim = _mm256_load_ps(&src_im[i]); dst[j].m_im = tim; ++j; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void r4_copy_from_avxc8f32(float * __restrict __ATTR_ALIGN__(32) dst_re, float * __restrict __ATTR_ALIGN__(32) dst_im, const AVXc8f32 * __restrict _ATTR_ALIGN__(32) src, const int32_t len) { #if defined __GNUC__ && !defined __INTEL_COMPILER dst_re = (float*)__builtin_assume_aligned(dst_re,64); dst_im = (float*)__builtin_assume_aligned(dst_im,64); src = (const AVXc8f32*)__builtin_assume_aligned(src,64); #elif defined __ICC || defined __INTEL_COMPILER __assume_aligned(dst_re,64); __assume_aligned(dst_im,64); __assume_aligned(src,64); #endif int32_t j; j = 0; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) #endif for(int32_t i = 0; i != len; i += 8) { const __m256 tre = src[j].m_re; _mm256_store_ps(&dst_re[i],tre; const __m256 tim = src[j].m_im; _mm256_store_ps(&dst_im[i],tim); } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_unroll2x_pd( #if defined __ICC || defined __INTEL_COMPILER double * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER double * __restrict __ATTR_ALIGN__(64), #endif const int32_t vlen, const double val) { __m256d vec = _mm256_set1_pd(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 8) { _mm256_storeu_pd(&v[i + 0], vec); _mm256_storeu_pd(&v[i + 4], vec); } #pragma loop_count min(1),avg(2),max(3) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if ((reinterpret_cast<uintptr_t>(v)& 0x1F) != 0ULL) { for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 8) { _mm256_storeu_pd(&v[i + 0], vec); _mm256_storeu_pd(&v[i + 4], vec); } } else { v = (double*)__builtin_assume_aligned(v,32); for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 8) { _mm256_store_pd(&v[i+0], vec); _mm256_store_pd(&v[i+4], vec); } } for (; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_unroll2x_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float * __restrict __ATTR_ALIGN__(32) v, #endif const int32_t vlen, const float val) { __m256 ymm0 = _mm256_set1_ps(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 16) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); } #pragma loop_count min(1),avg(4),max(7) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x1F) != 0ULL) { for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 16) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); } } else { v = (float*)__builtin_assume_aligned(v,32); for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 16) { _mm256_store_ps(&v[i+0], ymm0); _mm256_store_ps(&v[i+8], ymm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_unroll4x_pd( #if defined __ICC || defined __INTEL_COMPILER double * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER double * __restrict __ATTR_ALIGN__(32) v, #endif const int32_t vlen, const double val) { __m256d vec = _mm256_set1_pd(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 16) { _mm256_storeu_pd(&v[i + 0], vec); _mm256_storeu_pd(&v[i + 4], vec); _mm256_storeu_pd(&v[i + 8], vec); _mm256_storeu_pd(&v[i + 12], vec); } #pragma loop_count min(1),avg(2),max(3) for (; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if ((reinterpret_cast<uintptr_t>(v)& 0x1F) != 0ULL) { for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 16) { _mm256_storeu_pd(&v[i + 0], vec); _mm256_storeu_pd(&v[i + 4], vec); _mm256_storeu_pd(&v[i + 8], vec); _mm256_storeu_pd(&v[i + 12], vec); } } else { v = (double*)__builtin_assume_aligned(v,32); for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 16) { _mm256_store_pd(&v[i+0], vec); _mm256_store_pd(&v[i+4], vec); _mm256_store_pd(&v[i+8], vec); _mm256_store_pd(&v[i+12], vec); } } for (; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_unroll4x_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float * __restrict __ATTR_ALIGN__(32) v, #endif const int32_t vlen, const float val) { __m256 ymm0 = _mm256_set1_ps(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 32) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); _mm256_storeu_ps(&v[i+16], ymm0); _mm256_storeu_ps(&v[i+24], ymm0); } #pragma loop_count min(1),avg(4),max(7) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x1F) != 0ULL) { for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 32) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); _mm256_storeu_ps(&v[i+16], ymm0); _mm256_storeu_ps(&v[i+24], ymm0); } } else { v = (float*)__builtin_assume_aligned(v,32); for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 32) { _mm256_store_ps(&v[i+0], ymm0); _mm256_store_ps(&v[i+8], ymm0); _mm256_store_ps(&v[i+16], ymm0); _mm256_store_ps(&v[i+24], ymm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_unroll8x_pd( #if defined __ICC || defined __INTEL_COMPILER double * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER double * __restrict __ATTR_ALIGN__(32) v, #endif const int32_t vlen, const double val) { __m256d vec = _mm256_set1_pd(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 32) { _mm256_storeu_pd(&v[i + 0], vec); _mm256_storeu_pd(&v[i + 4], vec); _mm256_storeu_pd(&v[i + 8], vec); _mm256_storeu_pd(&v[i + 12], vec); _mm256_storeu_pd(&v[i + 16], vec); _mm256_storeu_pd(&v[i + 20], vec); _mm256_storeu_pd(&v[i + 24], vec); _mm256_storeu_pd(&v[i + 28], vec); } #pragma loop_count min(1),avg(2),max(3) for (; i != vlen; ++i){ v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if ((reinterpret_cast<uintptr_t>(v)& 0x1F) != 0ULL) { for (i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 32) { _mm256_storeu_pd(&v[i + 0], vec); _mm256_storeu_pd(&v[i + 4], vec); _mm256_storeu_pd(&v[i + 8], vec); _mm256_storeu_pd(&v[i + 12], vec); _mm256_storeu_pd(&v[i + 16], vec); _mm256_storeu_pd(&v[i + 20], vec); _mm256_storeu_pd(&v[i + 24], vec); _mm256_storeu_pd(&v[i + 28], vec); } } else { v = (double*)__builtin_assume_aligned(v,32); for ( i = 0; i != ROUND_TO_FOUR(vlen, 4); i += 32) { _mm256_store_pd(&v[i+0], vec); _mm256_store_pd(&v[i+4], vec); _mm256_store_pd(&v[i+8], vec); _mm256_store_pd(&v[i+12],vec); _mm256_store_pd(&v[i+16],vec); _mm256_store_pd(&v[i+20],vec); _mm256_store_pd(&v[i+24],vec); _mm256_store_pd(&v[i+28],vec); } } for (; i != vlen; ++i){ v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_unroll8x_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float * __restrict __ATTR_ALIGN__(32) v, #endif const int32_t vlen, const float val) { __m256 ymm0 = _mm256_set1_ps(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 64) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); _mm256_storeu_ps(&v[i+16], ymm0); _mm256_storeu_ps(&v[i+24], ymm0); _mm256_storeu_ps(&v[i+32], ymm0); _mm256_storeu_ps(&v[i+40], ymm0); _mm256_storeu_ps(&v[i+48], ymm0); _mm256_storeu_ps(&v[i+56], ymm0); } #pragma loop_count min(1),avg(4),max(7) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x1F) != 0ULL) { for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 64) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); _mm256_storeu_ps(&v[i+16], ymm0); _mm256_storeu_ps(&v[i+24], ymm0); _mm256_storeu_ps(&v[i+32], ymm0); _mm256_storeu_ps(&v[i+40], ymm0); _mm256_storeu_ps(&v[i+48], ymm0); _mm256_storeu_ps(&v[i+56], ymm0); } }else { v = (float*)__builtin_assume_aligned(v,32)l for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 64) { _mm256_store_ps(&v[i+0], ymm0); _mm256_store_ps(&v[i+8], ymm0); _mm256_store_ps(&v[i+16], ymm0); _mm256_store_ps(&v[i+24], ymm0); _mm256_store_ps(&v[i+32], ymm0); _mm256_store_ps(&v[i+40], ymm0); _mm256_store_ps(&v[i+48], ymm0); _mm256_store_ps(&v[i+56], ymm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } #if defined __AVX512F__ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_unroll2x_pd( #if defined __ICC || defined __INTEL_COMPILER double * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER double * __restrict __ATTR_ALIGN__(64) v, #endif const int32_t vlen, const double val) { __m512d zmm0 = _mm512_set1_pd(val); int32_t i; #if defined ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 16) { _mm512_storeu_pd(&v[i+0], zmm0); _mm512_storeu_pd(&v[i+8], zmm0); } #pragma loop_count min(1),avg(4),max(7) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x3F) != 0ULL) { for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 16) { _mm512_storeu_pd(&v[i+0], zmm0); _mm512_storeu_pd(&v[i+8], zmm0); } } else { v = (double*)__builtin_assume_aligned(v,64); for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 16) { _mm512_store_pd(&v[i+0], zmm0); _mm512_store_pd(&v[i+8], zmm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_unroll2x_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float * __restrict __ATTR_ALIGN__(64) v, #endif const int32_t vlen, const float val) { __m512 zmm0 = _mm512_set1_ps(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 32) { _mm512_storeu_ps(&v[i+0], zmm0); _mm512_storeu_ps(&v[i+16], zmm0); } #pragma loop_count min(1),avg(8),max(1) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x3F) != 0ULL) { for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 32) { _mm512_storeu_ps(&v[i+0], zmm0); _mm512_storeu_ps(&v[i+16], zmm0); } } else { v = (float*)__builtin_assume_aligned(v,64); for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 32) { _mm512_store_ps(&v[i+0], zmm0); _mm512_store_ps(&v[i+16], zmm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_unroll4x_pd( #if defined __ICC || defined __INTEL_COMPILER double * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER double * __restrict __ATTR_ALIGN__(64) v, #endif const int32_t vlen, const double val) { __m512d zmm0 = _mm512_set1_pd(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 32) { _mm512_storeu_pd(&v[i+0], zmm0); _mm512_storeu_pd(&v[i+8], zmm0); _mm512_storeu_pd(&v[i+16], zmm0); _mm512_storeu_pd(&v[i+24], zmm0); } #pragma loop_count min(1),avg(4),max(7) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x3F) != 0ULL) { for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 32) { _mm512_storeu_pd(&v[i+0], zmm0); _mm512_storeu_pd(&v[i+8], zmm0); _mm512_storeu_pd(&v[i+16], zmm0); _mm512_storeu_pd(&v[i+24], zmm0); } } else { v = (double*)__builtin_assume_aligned(v,64); for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 32) { _mm512_store_pd(&v[i+0], zmm0); _mm512_store_pd(&v[i+8], zmm0); _mm512_store_pd(&v[i+16], zmm0); _mm512_store_pd(&v[i+24], zmm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_unroll4x_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && defined __INTEL_COMPILER float * __restrict __ATTR_ALIIGN__(64) v, #endif const int32_t vlen, const float val) { __m512 zmm0 = _mm512_set1_ps(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 64) { _mm512_storeu_ps(&v[i+0], zmm0); _mm512_storeu_ps(&v[i+16], zmm0); _mm512_storeu_ps(&v[i+32], zmm0); _mm512_storeu_ps(&v[i+48], zmm0); } #pragma loop_count min(1),avg(8),max(15) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x3F) ! = 0ULL) { for(i = 0LL; i != ROUND_TO_SIXTEEN(vlen,16LL); i += 64LL) { _mm512_storeu_ps(&v[i+0LL], zmm0); _mm512_storeu_ps(&v[i+16LL], zmm0); _mm512_storeu_ps(&v[i+32LL], zmm0); _mm512_storeu_ps(&v[i+48LL], zmm0); } } else { v = (float*)__builtin_assume_aligned(v,64); for(i = 0LL; i != ROUND_TO_SIXTEEN(vlen,16LL); i += 64LL) { _mm512_store_ps(&v[i+0LL], zmm0); _mm512_store_ps(&v[i+16LL], zmm0); _mm512_store_ps(&v[i+32LL], zmm0); _mm512_store_ps(&v[i+48LL], zmm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_unroll8x_pd( #if defined __ICC || defined __INTEL_COMPILER double * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER double * __restrict __ATTR_ALIGN__(64) v, #endif const int32_t vlen, const double val) { __m512d vec = _mm512_set1_pd(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER for (i = 0; i != ROUND_TO_EIGHT(vlen, 8); i += 64) { _mm512_storeu_pd(&v[i + 0], vec); _mm512_storeu_pd(&v[i + 8], vec); _mm512_storeu_pd(&v[i + 16], vec); _mm512_storeu_pd(&v[i + 24], vec); _mm512_storeu_pd(&v[i + 32], vec); _mm512_storeu_pd(&v[i + 40], vec); _mm512_storeu_pd(&v[i + 48], vec); _mm512_storeu_pd(&v[i + 56], vec); } #pragma loop_count min(1),avg(4),max(7) for (; i != vlen; ++i){ v[i] = val; } #elif defined __GNUC__ && !defined(__INTEL_COMPILER) if ((reinterpret_cast<uintptr_t>(v)& 0x3F) != 0ULL) { for (i = 0; i != ROUND_TO_EIGHT(vlen, 8); i += 64) { _mm512_storeu_pd(&v[i + 0], vec); _mm512_storeu_pd(&v[i + 8], vec); _mm512_storeu_pd(&v[i + 16], vec); _mm512_storeu_pd(&v[i + 24], vec); _mm512_storeu_pd(&v[i + 32], vec); _mm512_storeu_pd(&v[i + 40], vec); _mm512_storeu_pd(&v[i + 48], vec); _mm512_storeu_pd(&v[i + 56], vec); } } else { for (i = 0; i != ROUND_TO_EIGHT(vlen, 8); i += 64) { _mm512_store_pd(&v[i+0],vec); _mm512_store_pd(&v[i+8],vec); _mm512_store_pd(&v[i+16],vec); _mm512_store_pd(&v[i+24],vec); _mm512_store_pd(&v[i+32],vec); _mm512_store_pd(&v[i+40],vec); _mm512_store_pd(&v[i+48],vec); _mm512_store_pd(&v[i+56],vec); } } for (; i != vlen; ++i){ v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_unroll8x_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float __restrict __ATTR_ALIGN__(64) v, #endif const int32_t vlen, const float val) { __m512 zmm0 = _mm512_set1_ps(val); int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma code_align(32) for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 128) { _mm512_storeu_ps(&v[i+0], zmm0); _mm512_storeu_ps(&v[i+16], zmm0); _mm512_storeu_ps(&v[i+32], zmm0); _mm512_storeu_ps(&v[i+48], zmm0); _mm512_storeu_ps(&v[i+64], zmm0); _mm512_storeu_ps(&v[i+80], zmm0); _mm512_storeu_ps(&v[i+96], zmm0); _mm512_storeu_ps(&v[i+112], zmm0); } #pragma loop_count min(1),avg(8),max(15) for(; i != vlen; ++i) { v[i] = val; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x3F) != 0ULL) { for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 128) { _mm512_storeu_ps(&v[i+0], zmm0); _mm512_storeu_ps(&v[i+16], zmm0); _mm512_storeu_ps(&v[i+32], zmm0); _mm512_storeu_ps(&v[i+48], zmm0); _mm512_storeu_ps(&v[i+64], zmm0); _mm512_storeu_ps(&v[i+80], zmm0); _mm512_storeu_ps(&v[i+96], zmm0); _mm512_storeu_ps(&v[i+112], zmm0); } } else { v = (float*)__builtin_assume_aligned(v,64); for(i = 0LL; i != ROUND_TO_SIXTEEN(vlen,16LL); i += 128) { _mm512_store_ps(&v[i+0LL], zmm0); _mm512_store_ps(&v[i+16LL], zmm0); _mm512_store_ps(&v[i+32LL], zmm0); _mm512_store_ps(&v[i+48LL], zmm0); _mm512_store_ps(&v[i+64LL], zmm0); _mm512_store_ps(&v[i+80LL], zmm0); _mm512_store_ps(&v[i+96LL], zmm0); _mm512_store_ps(&v[i+112LL], zmm0); } } for(; i != vlen; ++i) { v[i] = val; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_init_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float __restrict __ATTR_ALIGN__(64) v, #endif const int32_t vlen, const float val) { __m512 zmm = _mm512_set1_ps(val); #if defined __ICC || defined __INTEL_COMPILER if(vlen <= MEMMOVE_1ELEM) { return; } else if(vlen <= MEMMOVE_16ELEMS) { _mm512_storeu_ps(&v[0],zmm); return; } else if(vlen <= MEMMOVE_32ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_64ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); _mm512_storeu_ps(&v[2*ZMM_LEN],zmm); _mm512_storeu_ps(&v[3*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_128ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); _mm512_storeu_ps(&v[2*ZMM_LEN],zmm); _mm512_storeu_ps(&v[3*ZMM_LEN],zmm); _mm512_storeu_ps(&v[4*ZMM_LEN],zmm); _mm512_storeu_ps(&v[5*ZMM_LEN],zmm); _mm512_storeu_ps(&v[6*ZMM_LEN],zmm); _mm512_storeu_ps(&v[7*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_256ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); _mm512_storeu_ps(&v[2*ZMM_LEN],zmm); _mm512_storeu_ps(&v[3*ZMM_LEN],zmm); _mm512_storeu_ps(&v[4*ZMM_LEN],zmm); _mm512_storeu_ps(&v[5*ZMM_LEN],zmm); _mm512_storeu_ps(&v[6*ZMM_LEN],zmm); _mm512_storeu_ps(&v[7*ZMM_LEN],zmm); _mm512_storeu_ps(&v[8*ZMM_LEN],zmm); _mm512_storeu_ps(&v[9*ZMM_LEN],zmm); _mm512_storeu_ps(&v[10*ZMM_LEN],zmm); _mm512_storeu_ps(&v[11*ZMM_LEN],zmm); _mm512_storeu_ps(&v[12*ZMM_LEN],zmm); _mm512_storeu_ps(&v[13*ZMM_LEN],zmm); _mm512_storeu_ps(&v[14*ZMM_LEN],zmm); _mm512_storeu_ps(&v[15*ZMM_LEN],zmm); return; } else if(vlen > MEMMOVE_256ELEMS) { int32_t i; #pragma code_align(32) for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 128) { _mm512_storeu_ps(&v[i+0], zmm); _mm512_storeu_ps(&v[i+16], zmm); _mm512_storeu_ps(&v[i+32], zmm); _mm512_storeu_ps(&v[i+48], zmm); _mm512_storeu_ps(&v[i+64], zmm); _mm512_storeu_ps(&v[i+80], zmm); _mm512_storeu_ps(&v[i+96], zmm); _mm512_storeu_ps(&v[i+112], zmm); } #pragma loop_count min(1),avg(8),max(15) for(; i != vlen; ++i) { v[i] = val; } return; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x3F) != 0ULL) { if(vlen <= MEMMOVE_1ELEM) { return; } else if(vlen <= MEMMOVE_16ELEMS) { _mm512_storeu_ps(&v[0],zmm); return; } else if(vlen <= MEMMOVE_32ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_64ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); _mm512_storeu_ps(&v[2*ZMM_LEN],zmm); _mm512_storeu_ps(&v[3*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_128ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); _mm512_storeu_ps(&v[2*ZMM_LEN],zmm); _mm512_storeu_ps(&v[3*ZMM_LEN],zmm); _mm512_storeu_ps(&v[4*ZMM_LEN],zmm); _mm512_storeu_ps(&v[5*ZMM_LEN],zmm); _mm512_storeu_ps(&v[6*ZMM_LEN],zmm); _mm512_storeu_ps(&v[7*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_256ELEMS) { _mm512_storeu_ps(&v[0],zmm); _mm512_storeu_ps(&v[1*ZMM_LEN],zmm); _mm512_storeu_ps(&v[2*ZMM_LEN],zmm); _mm512_storeu_ps(&v[3*ZMM_LEN],zmm); _mm512_storeu_ps(&v[4*ZMM_LEN],zmm); _mm512_storeu_ps(&v[5*ZMM_LEN],zmm); _mm512_storeu_ps(&v[6*ZMM_LEN],zmm); _mm512_storeu_ps(&v[7*ZMM_LEN],zmm); _mm512_storeu_ps(&v[8*ZMM_LEN],zmm); _mm512_storeu_ps(&v[9*ZMM_LEN],zmm); _mm512_storeu_ps(&v[10*ZMM_LEN],zmm); _mm512_storeu_ps(&v[11*ZMM_LEN],zmm); _mm512_storeu_ps(&v[12*ZMM_LEN],zmm); _mm512_storeu_ps(&v[13*ZMM_LEN],zmm); _mm512_storeu_ps(&v[14*ZMM_LEN],zmm); _mm512_storeu_ps(&v[15*ZMM_LEN],zmm); return; } else if(vlen > MEMMOVE_256ELEMS) { for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 128) { _mm512_storeu_ps(&v[i+0], zmm); _mm512_storeu_ps(&v[i+16], zmm); _mm512_storeu_ps(&v[i+32], zmm); _mm512_storeu_ps(&v[i+48], zmm); _mm512_storeu_ps(&v[i+64], zmm); _mm512_storeu_ps(&v[i+80], zmm); _mm512_storeu_ps(&v[i+96], zmm); _mm512_storeu_ps(&v[i+112], zmm); } for(; i != vlen; ++i) { v[i] = val; } return; } else { if(vlen <= MEMMOVE_1ELEM) { return; } else if(vlen <= MEMMOVE_16ELEMS) { _mm512_store_ps(&v[0],zmm); return; } else if(vlen <= MEMMOVE_32ELEMS) { _mm512_store_ps(&v[0],zmm); _mm512_store_ps(&v[1*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_64ELEMS) { _mm512_store_ps(&v[0],zmm); _mm512_store_ps(&v[1*ZMM_LEN],zmm); _mm512_store_ps(&v[2*ZMM_LEN],zmm); _mm512_store_ps(&v[3*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_128ELEMS) { _mm512_store_ps(&v[0],zmm); _mm512_store_ps(&v[1*ZMM_LEN],zmm); _mm512_store_ps(&v[2*ZMM_LEN],zmm); _mm512_store_ps(&v[3*ZMM_LEN],zmm); _mm512_store_ps(&v[4*ZMM_LEN],zmm); _mm512_store_ps(&v[5*ZMM_LEN],zmm); _mm512_store_ps(&v[6*ZMM_LEN],zmm); _mm512_store_ps(&v[7*ZMM_LEN],zmm); return; } else if(vlen <= MEMMOVE_256ELEMS) { _mm512_store_ps(&v[0],zmm); _mm512_store_ps(&v[1*ZMM_LEN],zmm); _mm512_store_ps(&v[2*ZMM_LEN],zmm); _mm512_store_ps(&v[3*ZMM_LEN],zmm); _mm512_store_ps(&v[4*ZMM_LEN],zmm); _mm512_store_ps(&v[5*ZMM_LEN],zmm); _mm512_store_ps(&v[6*ZMM_LEN],zmm); _mm512_store_ps(&v[7*ZMM_LEN],zmm); _mm512_store_ps(&v[8*ZMM_LEN],zmm); _mm512_store_ps(&v[9*ZMM_LEN],zmm); _mm512_store_ps(&v[10*ZMM_LEN],zmm); _mm512_store_ps(&v[11*ZMM_LEN],zmm); _mm512_store_ps(&v[12*ZMM_LEN],zmm); _mm512_store_ps(&v[13*ZMM_LEN],zmm); _mm512_store_ps(&v[14*ZMM_LEN],zmm); _mm512_store_ps(&v[15*ZMM_LEN],zmm); return; } else if(vlen > MEMMOVE_256ELEMS) { v = (float*)__builtin_assume_aligned(v,64); for(i = 0; i != ROUND_TO_SIXTEEN(vlen,16); i += 128) { _mm512_store_ps(&v[i+0], zmm); _mm512_store_ps(&v[i+16], zmm); _mm512_store_ps(&v[i+32], zmm); _mm512_store_ps(&v[i+48], zmm); _mm512_store_ps(&v[i+64], zmm); _mm512_store_ps(&v[i+80], zmm); _mm512_store_ps(&v[i+96], zmm); _mm512_store_ps(&v[i+112], zmm); } for(; i != vlen; ++i) { v[i] = val; } return; } #endif } #endif // __AVX512F__ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_init_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict v, #elif defined __GNUC__ && !defined __INTEL_COMPILER float __restrict __ATTR_ALIGN__(32) v, #endif const int32_t vlen, const float val) { __m256 ymm = _mm256_set1_ps(val); #if defined __ICC || defined __INTEL_COMPILER if(vlen <= MEMMOVE_1ELEM) { return; } else if(vlen <= MEMMOVE_16ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_32ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); _mm256_storeu_ps(&v[2*YMM_LEN],ymm) _mm256_storeu_ps(&v[3*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_64ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); _mm256_storeu_ps(&v[2*YMM_LEN],ymm) _mm256_storeu_ps(&v[3*YMM_LEN],ymm); _mm256_storeu_ps(&v[4*YMM_LEN],ymm); _mm256_storeu_ps(&v[5*YMM_LEN],ymm); _mm256_storeu_ps(&v[6*YMM_LEN],ymm); _mm256_storeu_ps(&v[7*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_128ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); _mm256_storeu_ps(&v[2*YMM_LEN],ymm) _mm256_storeu_ps(&v[3*YMM_LEN],ymm); _mm256_storeu_ps(&v[4*YMM_LEN],ymm); _mm256_storeu_ps(&v[5*YMM_LEN],ymm); _mm256_storeu_ps(&v[6*YMM_LEN],ymm); _mm256_storeu_ps(&v[7*YMM_LEN],ymm); _mm256_storeu_ps(&v[8*YMM_LEN],ymm); _mm256_storeu_ps(&v[9*YMM_LEN],ymm); _mm256_storeu_ps(&v[10*YMM_LEN],ymm); _mm256_storeu_ps(&v[11*YMM_LEN],ymm); _mm256_storeu_ps(&v[12*YMM_LEN],ymm); _mm256_storeu_ps(&v[13*YMM_LEN],ymm); _mm256_storeu_ps(&v[14*YMM_LEN],ymm); _mm256_storeu_ps(&v[15*YMM_LEN],ymm); return; } else if(vlen > MEMMOVE_128ELEMS) { int32_t i; #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 64) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); _mm256_storeu_ps(&v[i+16], ymm0); _mm256_storeu_ps(&v[i+24], ymm0); _mm256_storeu_ps(&v[i+32], ymm0); _mm256_storeu_ps(&v[i+40], ymm0); _mm256_storeu_ps(&v[i+48], ymm0); _mm256_storeu_ps(&v[i+56], ymm0); } #pragma loop_count min(1),avg(4),max(7) for(; i != vlen; ++i) { v[i] = val; } } #elif defined __GNUC__ && !defined __INTEL_COMPILER if((reinterpret_cast<uintptr_t>(v) & 0x1F) != 0ULL) { if(vlen <= MEMMOVE_1ELEM) { return; } else if(vlen <= MEMMOVE_16ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_32ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); _mm256_storeu_ps(&v[2*YMM_LEN],ymm) _mm256_storeu_ps(&v[3*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_64ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); _mm256_storeu_ps(&v[2*YMM_LEN],ymm) _mm256_storeu_ps(&v[3*YMM_LEN],ymm); _mm256_storeu_ps(&v[4*YMM_LEN],ymm); _mm256_storeu_ps(&v[5*YMM_LEN],ymm); _mm256_storeu_ps(&v[6*YMM_LEN],ymm); _mm256_storeu_ps(&v[7*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_128ELEMS) { _mm256_storeu_ps(&v[0],ymm); _mm256_storeu_ps(&v[1*YMM_LEN],ymm); _mm256_storeu_ps(&v[2*YMM_LEN],ymm) _mm256_storeu_ps(&v[3*YMM_LEN],ymm); _mm256_storeu_ps(&v[4*YMM_LEN],ymm); _mm256_storeu_ps(&v[5*YMM_LEN],ymm); _mm256_storeu_ps(&v[6*YMM_LEN],ymm); _mm256_storeu_ps(&v[7*YMM_LEN],ymm); _mm256_storeu_ps(&v[8*YMM_LEN],ymm); _mm256_storeu_ps(&v[9*YMM_LEN],ymm); _mm256_storeu_ps(&v[10*YMM_LEN],ymm); _mm256_storeu_ps(&v[11*YMM_LEN],ymm); _mm256_storeu_ps(&v[12*YMM_LEN],ymm); _mm256_storeu_ps(&v[13*YMM_LEN],ymm); _mm256_storeu_ps(&v[14*YMM_LEN],ymm); _mm256_storeu_ps(&v[15*YMM_LEN],ymm); return; } else if(vlen > MEMMOVE_128ELEMS) { int32_t i; for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 64) { _mm256_storeu_ps(&v[i+0], ymm0); _mm256_storeu_ps(&v[i+8], ymm0); _mm256_storeu_ps(&v[i+16], ymm0); _mm256_storeu_ps(&v[i+24], ymm0); _mm256_storeu_ps(&v[i+32], ymm0); _mm256_storeu_ps(&v[i+40], ymm0); _mm256_storeu_ps(&v[i+48], ymm0); _mm256_storeu_ps(&v[i+56], ymm0); } for(; i != vlen; ++i) { v[i] = val; } }else { if(vlen <= MEMMOVE_1ELEM) { return; } else if(vlen <= MEMMOVE_16ELEMS) { _mm256_store_ps(&v[0],ymm); _mm256_store_ps(&v[1*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_32ELEMS) { _mm256_store_ps(&v[0],ymm); _mm256_store_ps(&v[1*YMM_LEN],ymm); _mm256_store_ps(&v[2*YMM_LEN],ymm) _mm256_store_ps(&v[3*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_64ELEMS) { _mm256_store_ps(&v[0],ymm); _mm256_store_ps(&v[1*YMM_LEN],ymm); _mm256_store_ps(&v[2*YMM_LEN],ymm) _mm256_store_ps(&v[3*YMM_LEN],ymm); _mm256_store_ps(&v[4*YMM_LEN],ymm); _mm256_store_ps(&v[5*YMM_LEN],ymm); _mm256_store_ps(&v[6*YMM_LEN],ymm); _mm256_store_ps(&v[7*YMM_LEN],ymm); return; } else if(vlen <= MEMMOVE_128ELEMS) { _mm256_store_ps(&v[0],ymm); _mm256_store_ps(&v[1*YMM_LEN],ymm); _mm256_store_ps(&v[2*YMM_LEN],ymm) _mm256_store_ps(&v[3*YMM_LEN],ymm); _mm256_store_ps(&v[4*YMM_LEN],ymm); _mm256_store_ps(&v[5*YMM_LEN],ymm); _mm256_store_ps(&v[6*YMM_LEN],ymm); _mm256_store_ps(&v[7*YMM_LEN],ymm); _mm256_store_ps(&v[8*YMM_LEN],ymm); _mm256_store_ps(&v[9*YMM_LEN],ymm); _mm256_store_ps(&v[10*YMM_LEN],ymm); _mm256_store_ps(&v[11*YMM_LEN],ymm); _mm256_store_ps(&v[12*YMM_LEN],ymm); _mm256_store_ps(&v[13*YMM_LEN],ymm); _mm256_store_ps(&v[14*YMM_LEN],ymm); _mm256_store_ps(&v[15*YMM_LEN],ymm); return; } else if(vlen > MEMMOVE_128ELEMS) { v = (float*)__builtin_assume_aligned(v,32); int32_t i; for(i = 0; i != ROUND_TO_EIGHT(vlen,8); i += 64) { _mm256_store_ps(&v[i+0], ymm0); _mm256_store_ps(&v[i+8], ymm0); _mm256_store_ps(&v[i+16], ymm0); _mm256_store_ps(&v[i+24], ymm0); _mm256_store_ps(&v[i+32], ymm0); _mm256_store_ps(&v[i+40], ymm0); _mm256_store_ps(&v[i+48], ymm0); _mm256_store_ps(&v[i+56], ymm0); } for(; i != vlen; ++i) { v[i] = val; } } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_memcpy_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict dst, const float * __restrict src, #elif defined __GNUC__ && !defined __INTEL_COMPILER float * __restrict __ATTR_ALIGN__(32) dst, const float * __restrict __ATTR_ALIGN__(32) src, #endif const int32_t len) { #if defined __ICC || defined __INTEL_COMPILER if(len <= MEMMOVE_1ELEM) { return; } else if(len <= MEMMOVE_16ELEMS) { const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); return; } else if(len <= MEMMOVE_32ELEMS) { const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[2*YMM_LEN]); _mm256_storeu_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[3*YMM_LEN]); _mm256_storeu_ps(&dst[3*YMM_LEN],ymm3); return; } else if(len <= MEMMOVE_64ELEMS) { const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[2*YMM_LEN]); _mm256_storeu_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[3*YMM_LEN]); _mm256_storeu_ps(&dst[3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_loadu_ps(&src[4*YMM_LEN]); _mm256_storeu_ps(&dst[4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_loadu_ps(&src[5*YMM_LEN]); _mm256_storeu_ps(&dst[5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_loadu_ps(&src[6*YMM_LEN]); _mm256_storeu_ps(&dst[6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_loadu_ps(&src[7*YMM_LEN]); _mm256_storeu_ps(&dst[7*YMM_LEN],ymm7); return; } else if(len <= MEMMOVE_128ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[0], _MM_HINT_T0); _mm_prefetch((const char*)&src[2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[4*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[6*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[8*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[2*YMM_LEN]); _mm256_storeu_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[3*YMM_LEN]); _mm256_storeu_ps(&dst[3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_loadu_ps(&src[4*YMM_LEN]); _mm256_storeu_ps(&dst[4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_loadu_ps(&src[5*YMM_LEN]); _mm256_storeu_ps(&dst[5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_loadu_ps(&src[6*YMM_LEN]); _mm256_storeu_ps(&dst[6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_loadu_ps(&src[7*YMM_LEN]); _mm256_storeu_ps(&dst[7*YMM_LEN],ymm7); const __m256 ymm8 = _mm256_loadu_ps(&src[8*YMM_LEN]); _mm256_storeu_ps(&dst[8*YMM_LEN],ymm8); const __m256 ymm9 = _mm256_loadu_ps(&src[9*YMM_LEN]); _mm256_storeu_ps(&dst[9*YMM_LEN],ymm9); const __m256 ymm10 = _mm256_loadu_ps(&src[10*YMM_LEN]); _mm256_storeu_ps(&dst[10*YMM_LEN],ymm10); const __m256 ymm11 = _mm256_loadu_ps(&src[11*YMM_LEN]); _mm256_storeu_ps(&dst[11*YMM_LEN],ymm11); const __m256 ymm12 = _mm256_loadu_ps(&src[12*YMM_LEN]); _mm256_storeu_ps(&dst[12*YMM_LEN],ymm12); const __m256 ymm13 = _mm256_loadu_ps(&src[13*YMM_LEN]); _mm256_storeu_ps(&dst[13*YMM_LEN],ymm13); const __m256 ymm14 = _mm256_loadu_ps(&src[14*YMM_LEN]); _mm256_storeu_ps(&dst[14*YMM_LEN],ymm14); const __m256 ymm15 = _mm256_loadu_ps(&src[15*YMM_LEN]); _mm256_storeu_ps(&dst[15*YMM_LEN],ymm155); return; } else if(len > MEMMOVE_128ELEMS) { int32_t i; #pragma code_align(32) for(i = 0; i != ROUND_TO_EIGHT(len,8); i += 64) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[i+0], _MM_HINT_T0); _mm_prefetch((const char*)&src[i+2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[i+4*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0 = _mm256_loadu_ps(&src[i+0]); _mm256_storeu_ps(&dst[i+0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[i+1*YMM_LEN]); _mm256_storeu_ps(&dst[i+1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[i+2*YMM_LEN]); _mm256_storeu_ps(&dst[i+2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[i+3*YMM_LEN]); _mm256_storeu_ps(&dst[i+3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_loadu_ps(&src[i+4*YMM_LEN]); _mm256_storeu_ps(&dst[i+4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_loadu_ps(&src[i+5*YMM_LEN]); _mm256_storeu_ps(&dst[i+5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_loadu_ps(&src[i+6*YMM_LEN]); _mm256_storeu_ps(&dst[i+6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_loadu_ps(&src[i+7*YMM_LEN]); _mm256_storeu_ps(&dst[i+7*YMM_LEN],ymm7); } #pragma loop_count min(1),avg(4),max(7) for(; i != len; ++i) { dst[i] = src[i]; } return; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if ((reinterpret_cast<uintptr_t>(dst)& 0x1F) != 0ULL && (reinterpret_cast<uintptr_t>(src)& 0x1F) != 0ULL) { if(len <= MEMMOVE_1ELEM) { return; } else if(len <= MEMMOVE_16ELEMS) { const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); return; } else if(len <= MEMMOVE_32ELEMS) { const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[2*YMM_LEN]); _mm256_storeu_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[3*YMM_LEN]); _mm256_storeu_ps(&dst[3*YMM_LEN],ymm3); return; } else if(len <= MEMMOVE_64ELEMS) { const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[2*YMM_LEN]); _mm256_storeu_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[3*YMM_LEN]); _mm256_storeu_ps(&dst[3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_loadu_ps(&src[4*YMM_LEN]); _mm256_storeu_ps(&dst[4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_loadu_ps(&src[5*YMM_LEN]); _mm256_storeu_ps(&dst[5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_loadu_ps(&src[6*YMM_LEN]); _mm256_storeu_ps(&dst[6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_loadu_ps(&src[7*YMM_LEN]); _mm256_storeu_ps(&dst[7*YMM_LEN],ymm7); return; } else if(len <= MEMMOVE_128ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[0], _MM_HINT_T0); _mm_prefetch((const char*)&src[2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[4*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[6*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[8*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0 = _mm256_loadu_ps(&src[0]); _mm256_storeu_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[1*YMM_LEN]); _mm256_storeu_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[2*YMM_LEN]); _mm256_storeu_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[3*YMM_LEN]); _mm256_storeu_ps(&dst[3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_loadu_ps(&src[4*YMM_LEN]); _mm256_storeu_ps(&dst[4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_loadu_ps(&src[5*YMM_LEN]); _mm256_storeu_ps(&dst[5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_loadu_ps(&src[6*YMM_LEN]); _mm256_storeu_ps(&dst[6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_loadu_ps(&src[7*YMM_LEN]); _mm256_storeu_ps(&dst[7*YMM_LEN],ymm7); const __m256 ymm8 = _mm256_loadu_ps(&src[8*YMM_LEN]); _mm256_storeu_ps(&dst[8*YMM_LEN],ymm8); const __m256 ymm9 = _mm256_loadu_ps(&src[9*YMM_LEN]); _mm256_storeu_ps(&dst[9*YMM_LEN],ymm9); const __m256 ymm10 = _mm256_loadu_ps(&src[10*YMM_LEN]); _mm256_storeu_ps(&dst[10*YMM_LEN],ymm10); const __m256 ymm11 = _mm256_loadu_ps(&src[11*YMM_LEN]); _mm256_storeu_ps(&dst[11*YMM_LEN],ymm11); const __m256 ymm12 = _mm256_loadu_ps(&src[12*YMM_LEN]); _mm256_storeu_ps(&dst[12*YMM_LEN],ymm12); const __m256 ymm13 = _mm256_loadu_ps(&src[13*YMM_LEN]); _mm256_storeu_ps(&dst[13*YMM_LEN],ymm13); const __m256 ymm14 = _mm256_loadu_ps(&src[14*YMM_LEN]); _mm256_storeu_ps(&dst[14*YMM_LEN],ymm14); const __m256 ymm15 = _mm256_loadu_ps(&src[15*YMM_LEN]); _mm256_storeu_ps(&dst[15*YMM_LEN],ymm155); return; } else if(len > MEMMOVE_128ELEMS) { int32_t i; for(i = 0; i != ROUND_TO_EIGHT(len,8); i += 64) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[i+0], _MM_HINT_T0); _mm_prefetch((const char*)&src[i+2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[i+4*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0 = _mm256_loadu_ps(&src[i+0]); _mm256_storeu_ps(&dst[i+0],ymm0); const __m256 ymm1 = _mm256_loadu_ps(&src[i+1*YMM_LEN]); _mm256_storeu_ps(&dst[i+1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_loadu_ps(&src[i+2*YMM_LEN]); _mm256_storeu_ps(&dst[i+2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_loadu_ps(&src[i+3*YMM_LEN]); _mm256_storeu_ps(&dst[i+3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_loadu_ps(&src[i+4*YMM_LEN]); _mm256_storeu_ps(&dst[i+4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_loadu_ps(&src[i+5*YMM_LEN]); _mm256_storeu_ps(&dst[i+5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_loadu_ps(&src[i+6*YMM_LEN]); _mm256_storeu_ps(&dst[i+6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_loadu_ps(&src[i+7*YMM_LEN]); _mm256_storeu_ps(&dst[i+7*YMM_LEN],ymm7); } for(; i != len; ++i) { dst[i] = src[i]; } return; } } else { if(len <= MEMMOVE_1ELEM) { return; } else if(len <= MEMMOVE_16ELEMS) { const __m256 ymm0 = _mm256_load_ps(&src[0]); _mm256_store_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_load_ps(&src[1*YMM_LEN]); _mm256_store_ps(&dst[1*YMM_LEN],ymm1); return; } else if(len <= MEMMOVE_32ELEMS) { const __m256 ymm0 = _mm256_load_ps(&src[0]); _mm256_store_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_load_ps(&src[1*YMM_LEN]); _mm256_store_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_load_ps(&src[2*YMM_LEN]); _mm256_store_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_load_ps(&src[3*YMM_LEN]); _mm256_store_ps(&dst[3*YMM_LEN],ymm3); return; } else if(len <= MEMMOVE_64ELEMS) { const __m256 ymm0 = _mm256_load_ps(&src[0]); _mm256_store_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_load_ps(&src[1*YMM_LEN]); _mm256_store_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_load_ps(&src[2*YMM_LEN]); _mm256_store_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_load_ps(&src[3*YMM_LEN]); _mm256_store_ps(&dst[3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_load_ps(&src[4*YMM_LEN]); _mm256_store_ps(&dst[4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_load_ps(&src[5*YMM_LEN]); _mm256_store_ps(&dst[5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_load_ps(&src[6*YMM_LEN]); _mm256_store_ps(&dst[6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_load_ps(&src[7*YMM_LEN]); _mm256_store_ps(&dst[7*YMM_LEN],ymm7); return; } else if(len <= MEMMOVE_128ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[0], _MM_HINT_T0); _mm_prefetch((const char*)&src[2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[4*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[6*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[8*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0 = _mm256_load_ps(&src[0]); _mm256_store_ps(&dst[0],ymm0); const __m256 ymm1 = _mm256_load_ps(&src[1*YMM_LEN]); _mm256_store_ps(&dst[1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_load_ps(&src[2*YMM_LEN]); _mm256_store_ps(&dst[2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_load_ps(&src[3*YMM_LEN]); _mm256_store_ps(&dst[3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_load_ps(&src[4*YMM_LEN]); _mm256_store_ps(&dst[4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_load_ps(&src[5*YMM_LEN]); _mm256_store_ps(&dst[5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_load_ps(&src[6*YMM_LEN]); _mm256_store_ps(&dst[6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_load_ps(&src[7*YMM_LEN]); _mm256_store_ps(&dst[7*YMM_LEN],ymm7); const __m256 ymm8 = _mm256_load_ps(&src[8*YMM_LEN]); _mm256_store_ps(&dst[8*YMM_LEN],ymm8); const __m256 ymm9 = _mm256_load_ps(&src[9*YMM_LEN]); _mm256_store_ps(&dst[9*YMM_LEN],ymm9); const __m256 ymm10 = _mm256_load_ps(&src[10*YMM_LEN]); _mm256_store_ps(&dst[10*YMM_LEN],ymm10); const __m256 ymm11 = _mm256_load_ps(&src[11*YMM_LEN]); _mm256_store_ps(&dst[11*YMM_LEN],ymm11); const __m256 ymm12 = _mm256_load_ps(&src[12*YMM_LEN]); _mm256_store_ps(&dst[12*YMM_LEN],ymm12); const __m256 ymm13 = _mm256_load_ps(&src[13*YMM_LEN]); _mm256_store_ps(&dst[13*YMM_LEN],ymm13); const __m256 ymm14 = _mm256_load_ps(&src[14*YMM_LEN]); _mm256_store_ps(&dst[14*YMM_LEN],ymm14); const __m256 ymm15 = _mm256_load_ps(&src[15*YMM_LEN]); _mm256_store_ps(&dst[15*YMM_LEN],ymm155); return; } else if(len > MEMMOVE_128ELEMS) { int32_t i; src = (float*)__builtin_assume_aligned(src,32); dst = (const float*)__builtin_assume_aligned(dst,32); for(i = 0; i != ROUND_TO_EIGHT(len,8); i += 64) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[i+0], _MM_HINT_T0); _mm_prefetch((const char*)&src[i+2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[i+4*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0 = _mm256_load_ps(&src[i+0]); _mm256_store_ps(&dst[i+0],ymm0); const __m256 ymm1 = _mm256_load_ps(&src[i+1*YMM_LEN]); _mm256_store_ps(&dst[i+1*YMM_LEN],ymm1); const __m256 ymm2 = _mm256_load_ps(&src[i+2*YMM_LEN]); _mm256_store_ps(&dst[i+2*YMM_LEN],ymm2); const _mm256 ymm3 = _mm256_load_ps(&src[i+3*YMM_LEN]); _mm256_store_ps(&dst[i+3*YMM_LEN],ymm3); const __m256 ymm4 = _mm256_load_ps(&src[i+4*YMM_LEN]); _mm256_store_ps(&dst[i+4*YMM_LEN],ymm4); const __m256 ymm5 = _mm256_load_ps(&src[i+5*YMM_LEN]); _mm256_store_ps(&dst[i+5*YMM_LEN],ymm5); const __m256 ymm6 = _mm256_load_ps(&src[i+6*YMM_LEN]); _mm256_store_ps(&dst[i+6*YMM_LEN],ymm6); const __m256 ymm7 = _mm256_load_ps(&src[i+7*YMM_LEN]); _mm256_storeups(&dst[i+7*YMM_LEN],ymm7); } for(; i != len; ++i) { dst[i] = src[i]; } return; } #endif } #if defined __AVX512F__ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_memcpy_ps( #if defined __ICC || defined __INTEL_COMPILER float * __restrict dst, const float * __restrict src, #elif defined __GNUC__ && !defined __INTEL_COMPILER float * __restrict __ATTR_ALIGN__(64) dst, const float * __restrict __ATTR_ALIGN__(64) src, #endif const int32_t len) { #endif #if defined __ICC || defined __INTEL_COMPILER if(len <= MEMMOVE_1ELEM) { return; } else if(len <= MEMMOVE_16ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); return; } else if(len <= MEMMOVE_32ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); return; } else if(len <= MEMMOVE_64ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); return; } else if(len <= MEMMOVE_128ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_loadu_ps(&src[4*ZMM_LEN]); _mm512_storeu_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_loadu_ps(&src[5*ZMM_LEN]); _mm512_storeu_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_loadu_ps(&src[6*ZMM_LEN]); _mm512_storeu_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _m512_loadu_ps(&src[7*ZMM_LEN]); _mm512_storeu_ps(&dst[7*ZMM_LEN],zmm7); return; } else if(len <= MEMMOVE_256ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[0], _MM_HINT_T0); _mm_prefetch((const char *)&src[1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[3*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[4*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[5*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[6*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[7*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[8*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[9*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[10*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[11*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[12*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[13*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[14*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[15*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_loadu_ps(&src[4*ZMM_LEN]); _mm512_storeu_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_loadu_ps(&src[5*ZMM_LEN]); _mm512_storeu_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_loadu_ps(&src[6*ZMM_LEN]); _mm512_storeu_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _mm512_loadu_ps(&src[7*ZMM_LEN]); _mm512_storeu_ps(&dst[7*ZMM_LEN],zmm7); const __m512 zmm8 = _mm512_loadu_ps(&src[8*ZMM_LEN]); _mm512_storeu_ps(&dst[8*ZMM_LEN],zmm8); const __m512 zmm9 = _mm512_loadu_ps(&src[9*ZMM_LEN]); _mm512_storeu_ps(&dst[9*ZMM_LEN],zmm9); const __m512 zmm10 = _mm512_loadu_ps(&src[10*ZMM_LEN]); _mm512_storeu_ps(&dst[10*ZMM_LEN],zmm10); const __m512 zmm11 = _mm512_loadu_ps(&src[11*ZMM_LEN]); _mm512_storeu_ps(&dst[11*ZMM_LEN],zmm11); const __m512 zmm12 = _mm512_loadu_ps(&src[12*ZMM_LEN]); _mm512_storeu_ps(&dst[12*ZMM_LEN],zmm12); const __m512 zmm13 = _mm512_loadu_ps(&src[13*ZMM_LEN]); _mm512_storeu_ps(&dst[13*ZMM_LEN],zmm13); const __m512 zmm14 = _mm512_loadu_ps(&src[14*ZMM_LEN]); _mm512_storeu_ps(&dst[14*ZMM_LEN],zmm14); const __m512 zmm15 = _mm512_loadu_ps(&src[15*ZMM_LEN]); _mm512_storeu_ps(&dst[15*ZMM_LEN],zmm15); return; } else if(len > MEMMOVE_256ELEMS) { int32_t i; #pragma code_align(32) for(i = 0; i != ROUND_TO_SIXTEEN(len,16); i += 128) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[i+0], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+3*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0 = _mm512_loadu_ps(&src[i+0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[i+1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[i+2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[i+3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_loadu_ps(&src[i+4*ZMM_LEN]); _mm512_storeu_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_loadu_ps(&src[i+5*ZMM_LEN]); _mm512_storeu_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_loadu_ps(&src[i+6*ZMM_LEN]); _mm512_storeu_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _mm512_loadu_ps(&src[i+7*ZMM_LEN]); _mm512_storeu_ps(&dst[7*ZMM_LEN],zmm7); } #pragma loop_count min(1),avg(8),max(15) for(; i != len; ++i) { dst[i] = src[i]; } return; } #elif defined __GNUC__ && !defined __INTEL_COMPILER if ((reinterpret_cast<uintptr_t>(dst)& 0x3F) != 0ULL && (reinterpret_cast<uintptr_t>(src)& 0x3F) != 0ULL) { if(len <= MEMMOVE_1ELEM) { return; } else if(len <= MEMMOVE_16ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); return; } else if(len <= MEMMOVE_32ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); return; } else if(len <= MEMMOVE_64ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); return; } else if(len <= MEMMOVE_128ELEMS) { const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_loadu_ps(&src[4*ZMM_LEN]); _mm512_storeu_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_loadu_ps(&src[5*ZMM_LEN]); _mm512_storeu_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_loadu_ps(&src[6*ZMM_LEN]); _mm512_storeu_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _m512_loadu_ps(&src[7*ZMM_LEN]); _mm512_storeu_ps(&dst[7*ZMM_LEN],zmm7); return; } else if(len <= MEMMOVE_256ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[0], _MM_HINT_T0); _mm_prefetch((const char *)&src[1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[3*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[4*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[5*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[6*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[7*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[8*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[9*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[10*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[11*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[12*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[13*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[14*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[15*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0 = _mm512_loadu_ps(&src[0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_loadu_ps(&src[4*ZMM_LEN]); _mm512_storeu_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_loadu_ps(&src[5*ZMM_LEN]); _mm512_storeu_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_loadu_ps(&src[6*ZMM_LEN]); _mm512_storeu_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _mm512_loadu_ps(&src[7*ZMM_LEN]); _mm512_storeu_ps(&dst[7*ZMM_LEN],zmm7); const __m512 zmm8 = _mm512_loadu_ps(&src[8*ZMM_LEN]); _mm512_storeu_ps(&dst[8*ZMM_LEN],zmm8); const __m512 zmm9 = _mm512_loadu_ps(&src[9*ZMM_LEN]); _mm512_storeu_ps(&dst[9*ZMM_LEN],zmm9); const __m512 zmm10 = _mm512_loadu_ps(&src[10*ZMM_LEN]); _mm512_storeu_ps(&dst[10*ZMM_LEN],zmm10); const __m512 zmm11 = _mm512_loadu_ps(&src[11*ZMM_LEN]); _mm512_storeu_ps(&dst[11*ZMM_LEN],zmm11); const __m512 zmm12 = _mm512_loadu_ps(&src[12*ZMM_LEN]); _mm512_storeu_ps(&dst[12*ZMM_LEN],zmm12); const __m512 zmm13 = _mm512_loadu_ps(&src[13*ZMM_LEN]); _mm512_storeu_ps(&dst[13*ZMM_LEN],zmm13); const __m512 zmm14 = _mm512_loadu_ps(&src[14*ZMM_LEN]); _mm512_storeu_ps(&dst[14*ZMM_LEN],zmm14); const __m512 zmm15 = _mm512_loadu_ps(&src[15*ZMM_LEN]); _mm512_storeu_ps(&dst[15*ZMM_LEN],zmm15); return; } else if(len > MEMMOVE_256ELEMS) { int32_t i; for(i = 0; i != ROUND_TO_SIXTEEN(len,16); i += 128) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[i+0], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+3*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0 = _mm512_loadu_ps(&src[i+0]); _mm512_storeu_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_loadu_ps(&src[i+1*ZMM_LEN]); _mm512_storeu_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_loadu_ps(&src[i+2*ZMM_LEN]); _mm512_storeu_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_loadu_ps(&src[i+3*ZMM_LEN]); _mm512_storeu_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_loadu_ps(&src[i+4*ZMM_LEN]); _mm512_storeu_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_loadu_ps(&src[i+5*ZMM_LEN]); _mm512_storeu_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_loadu_ps(&src[i+6*ZMM_LEN]); _mm512_storeu_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _mm512_loadu_ps(&src[i+7*ZMM_LEN]); _mm512_storeu_ps(&dst[7*ZMM_LEN],zmm7); } for(; i != len; ++i) { dst[i] = src[i]; } return; } else { if(len <= MEMMOVE_1ELEM) { return; } else if(len <= MEMMOVE_16ELEMS) { const __m512 zmm0 = _mm512_load_ps(&src[0]); _mm512_store_ps(&dst[0],zmm0); return; } else if(len <= MEMMOVE_32ELEMS) { const __m512 zmm0 = _mm512_load_ps(&src[0]); _mm512_store_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_load_ps(&src[1*ZMM_LEN]); _mm512_store_ps(&dst[1*ZMM_LEN],zmm1); return; } else if(len <= MEMMOVE_64ELEMS) { const __m512 zmm0 = _mm512_load_ps(&src[0]); _mm512_store_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_load_ps(&src[1*ZMM_LEN]); _mm512_store_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_load_ps(&src[2*ZMM_LEN]); _mm512_store_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_load_ps(&src[3*ZMM_LEN]); _mm512_store_ps(&dst[3*ZMM_LEN],zmm3); return; } else if(len <= MEMMOVE_128ELEMS) { const __m512 zmm0 = _mm512_load_ps(&src[0]); _mm512_store_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_load_ps(&src[1*ZMM_LEN]); _mm512_store_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_load_ps(&src[2*ZMM_LEN]); _mm512_store_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_load_ps(&src[3*ZMM_LEN]); _mm512_store_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_load_ps(&src[4*ZMM_LEN]); _mm512_store_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_load_ps(&src[5*ZMM_LEN]); _mm512_store_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_load_ps(&src[6*ZMM_LEN]); _mm512_store_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _m512_load_ps(&src[7*ZMM_LEN]); _mm512_store_ps(&dst[7*ZMM_LEN],zmm7); return; } else if(len <= MEMMOVE_256ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[0], _MM_HINT_T0); _mm_prefetch((const char *)&src[1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[3*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[4*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[5*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[6*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[7*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[8*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[9*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[10*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[11*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[12*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[13*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[14*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[15*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0 = _mm512_load_ps(&src[0]); _mm512_store_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_load_ps(&src[1*ZMM_LEN]); _mm512_store_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_load_ps(&src[2*ZMM_LEN]); _mm512_store_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_load_ps(&src[3*ZMM_LEN]); _mm512_store_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_load_ps(&src[4*ZMM_LEN]); _mm512_store_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_load_ps(&src[5*ZMM_LEN]); _mm512_store_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_load_ps(&src[6*ZMM_LEN]); _mm512_store_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _mm512_load_ps(&src[7*ZMM_LEN]); _mm512_store_ps(&dst[7*ZMM_LEN],zmm7); const __m512 zmm8 = _mm512_load_ps(&src[8*ZMM_LEN]); _mm512_store_ps(&dst[8*ZMM_LEN],zmm8); const __m512 zmm9 = _mm512_load_ps(&src[9*ZMM_LEN]); _mm512_store_ps(&dst[9*ZMM_LEN],zmm9); const __m512 zmm10 = _mm512_load_ps(&src[10*ZMM_LEN]); _mm512_store_ps(&dst[10*ZMM_LEN],zmm10); const __m512 zmm11 = _mm512_load_ps(&src[11*ZMM_LEN]); _mm512_store_ps(&dst[11*ZMM_LEN],zmm11); const __m512 zmm12 = _mm512_load_ps(&src[12*ZMM_LEN]); _mm512_store_ps(&dst[12*ZMM_LEN],zmm12); const __m512 zmm13 = _mm512_load_ps(&src[13*ZMM_LEN]); _mm512_store_ps(&dst[13*ZMM_LEN],zmm13); const __m512 zmm14 = _mm512_load_ps(&src[14*ZMM_LEN]); _mm512_store_ps(&dst[14*ZMM_LEN],zmm14); const __m512 zmm15 = _mm512_load_ps(&src[15*ZMM_LEN]); _mm512_store_ps(&dst[15*ZMM_LEN],zmm15); return; } else if(len > MEMMOVE_256ELEMS) { int32_t i; src = (float*)__builtin_assume_aligned(src,64); dst = (const float*)__builtin_assume_aligned(dst,64); for(i = 0; i != ROUND_TO_SIXTEEN(len,16); i += 128) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[i+0], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[i+3*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0 = _mm512_load_ps(&src[i+0]); _mm512_store_ps(&dst[0],zmm0); const __m512 zmm1 = _mm512_load_ps(&src[i+1*ZMM_LEN]); _mm512_store_ps(&dst[1*ZMM_LEN],zmm1); const __m512 zmm2 = _mm512_load_ps(&src[i+2*ZMM_LEN]); _mm512_store_ps(&dst[2*ZMM_LEN],zmm2); const __m512 zmm3 = _mm512_load_ps(&src[i+3*ZMM_LEN]); _mm512_store_ps(&dst[3*ZMM_LEN],zmm3); const __m512 zmm4 = _mm512_load_ps(&src[i+4*ZMM_LEN]); _mm512_store_ps(&dst[4*ZMM_LEN],zmm4); const __m512 zmm5 = _mm512_load_ps(&src[i+5*ZMM_LEN]); _mm512_store_ps(&dst[5*ZMM_LEN],zmm5); const __m512 zmm6 = _mm512_load_ps(&src[i+6*ZMM_LEN]); _mm512_store_ps(&dst[6*ZMM_LEN],zmm6); const __m512 zmm7 = _mm512_load_ps(&src[i+7*ZMM_LEN]); _mm512_store_ps(&dst[7*ZMM_LEN],zmm7); } for(; i != len; ++i) { dst[i] = src[i]; } return; } #endif } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_cached_memmove(void * __restrict _Dst, const void * __restrict _Src, const int32_t _nelems) { if (MEMMOVE_1ELEM <= _nelems) { return;} char * __restrict dst = (char *)_Dst; const char * __restrict src = (const char *)_Src; if ( _nelems <= MEMMOVE_16ELEMS) { const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_storeu_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1*YMM_LEN])); _mm256_storeu_ps((float*)&dst[1*YMM_LEN], ymm1); return; } else if ( _nelems <= MEMMOVE_32ELEMS) { const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_storeu_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1*YMM_LEN])); _mm256_storeu_ps((float*)&dst[1*YMM_LEN],ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[2*YMM_LEN])); _mm256_storeu_ps((float*)&dst[2*YMM_LEN],ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[3*YMM_LEN])); _mm256_storeu_ps((float*)&dst[3*YMM_LEN],ymm3); return; } else if ( _nelems <= MEMMOVE_64ELEMS){ const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_storeu_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1*YMM_LEN])); _mm256_storeu_ps((float*)&dst[1*YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[2*YMM_LEN])); _mm256_storeu_ps((float*)&dst[2*YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[3*YMM_LEN])); _mm256_storeu_ps((float*)&dst[3*YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[4*YMM_LEN])); _mm256_storeu_ps((float*)&dst[4*YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[5*YMM_LEN])); _mm256_storeu_ps((float*)&dst[5*YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[6*YMM_LEN])); _mm256_storeu_ps((float*)&dst[6*YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[7*YMM_LEN])); _mm256_storeu_ps((float*)&dst[7*YMM_LEN], ymm7); return; } else if ( _nelems <= MEMMOVE_128ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[0], _MM_HINT_T0); _mm_prefetch((const char*)&src[2*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[4*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[6*YMM_LEN],_MM_HINT_T0); _mm_prefetch((const char*)&src[8*YMM_LEN],_MM_HINT_T0); #endif const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_storeu_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1*YMM_LEN])); _mm256_storeu_ps((float*)&dst[1*YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[2*YMM_LEN])); _mm256_storeu_ps((float*)&dst[2*YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[3*YMM_LEN])); _mm256_storeu_ps((float*)&dst[3*YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[4*YMM_LEN])); _mm256_storeu_ps((float*)&dst[4*YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[5*YMM_LEN])); _mm256_storeu_ps((float*)&dst[5*YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[6*YMM_LEN])); _mm256_storeu_ps((float*)&dst[6*YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[7*YMM_LEN])); _mm256_storeu_ps((float*)&dst[7*YMM_LEN], ymm7); const __m256 ymm8(_mm256_loadu_ps((float*)&src[8*YMM_LEN])); _mm256_storeu_ps((float*)&dst[8*YMM_LEN], ymm8); const __m256 ymm9(_mm256_loadu_ps((float*)&src[9*YMM_LEN])); _mm256_storeu_ps((float*)&dst[9*YMM_LEN], ymm9); const __m256 ymm10(_mm256_loadu_ps((float*)&src[10*YMM_LEN])); _mm256_storeu_ps((float*)&dst[10*YMM_LEN],ymm10); const __m256 ymm11(_mm256_loadu_ps((float*)&src[11*YMM_LEN])); _mm256_storeu_ps((float*)&dst[11*YMM_LEN],ymm11); const __m256 ymm12(_mm256_loadu_ps((float*)&src[12*YMM_LEN])); _mm256_storeu_ps((float*)&dst[12*YMM_LEN],ymm12); const __m256 ymm13(_mm256_loadu_ps((float*)&src[13*YMM_LEN])); _mm256_storeu_ps((float*)&dst[13*YMM_LEN],ymm13); const __m256 ymm14(_mm256_loadu_ps((float*)&src[14*YMM_LEN])); _mm256_storeu_ps((float*)&dst[14*YMM_LEN],ymm14); const __m256 ymm15(_mm256_loadu_ps((float*)&src[15*YMM_LEN])); _mm256_storeu_ps((float*)&dst[15*YMM_LEN],ymm15); return; } else if (_nelems <= MAXFLOATSPERPAGE4KiB){ int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count(1024) #endif for (i = 0; i != ROUND_TO_EIGHT(_nelems, 8); i += 64) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char*)&src[i], _MM_HINT_T0); #endif const __m256 ymm0(_mm256_loadu_ps((float*)&src[i+0])); _mm256_storeu_ps((float*)&dst[i+0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[i+1*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+1*YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[i+2*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+2*YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[i+3*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+3*YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[i+4*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+4*YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[i+5*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+5*YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[i+6*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+6*YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[i+7*YMM_LEN])); _mm256_storeu_ps((float*)&dst[i+7*YMM_LEN], ymm7); } #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count min(1),avg(4),max(7) #endif for (; i != _nelems; ++i) { dst[i] = src[i]; } return; } else if (_nelems > MAXFLOATSPERPAGE4KiB) { int32_t j; for (int32_t k = 0; k != _nelems; k += MAXFLOATSPERPAGE4KiB) { volatile float t = src[k + MAXFLOATSPERPAGE4KiB]; for (j = k + 128; j != k + MAXFLOATSPERPAGE4KiB; j += 64) { _mm_prefetch((const char*)&src[j], _MM_HINT_T0); } for (j = k; j != ROUND_TO_EIGHT(k + MAXFLOATSPERPAGE4KiB, 8); j += 64) { const __m256 ymm0(_mm256_loadu_ps((float*)&src[j+0])); _mm256_storeu_ps((float*)&dst[j+0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[j+1*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+1*YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[j+2*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+2*YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[j+3*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+3*YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[j+4*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+4*YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[j+5*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+5*YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[j+6*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+6*YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[j+7*YMM_LEN])); _mm256_storeu_ps((float*)&dst[j+7*YMM_LEN], ymm7); } for (; j != _nelems; ++j) { dst[j] = src[j]; } } return; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx256_uncached_memmove(void * __restrict _Dst, const void * __restrict _Src, const int32_t _nelems) { if (_nelems <= MEMMOVE_1ELEM) { return;} char * __restrict dst = (char*)_Dst; const char * __restrict src = (const char*)_Src; uintptr_t dst_len = (uintptr_t)dst; int32_t _nbytes = 4*_nelems; int32_t misalign = 0; if (dst_len & 0x1F) { misalign = min_val(0x20 - (dst_len & 0x1F),_nbytes); dst += misalign; dst_len += misalign; _nbytes -= misalign; } if (_nelems <= MEMMOVE_16ELEMS) { const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_stream_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1 * YMM_LEN])); _mm256_stream_ps((float*)&dst[1 * YMM_LEN], ymm1); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_32ELEMS) { const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_stream_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1 * YMM_LEN])); _mm256_stream_ps((float*)&dst[1 * YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[2 * YMM_LEN])); _mm256_stream_ps((float*)&dst[2 * YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[3 * YMM_LEN])); _mm256_stream_ps((float*)&dst[3 * YMM_LEN], ymm3); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_64ELEMS){ const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_stream_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1 * YMM_LEN])); _mm256_stream_ps((float*)&dst[1 * YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[2 * YMM_LEN])); _mm256_stream_ps((float*)&dst[2 * YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[3 * YMM_LEN])); _mm256_stream_ps((float*)&dst[3 * YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[4 * YMM_LEN])); _mm256_stream_ps((float*)&dst[4 * YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[5 * YMM_LEN])); _mm256_stream_ps((float*)&dst[5 * YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[6 * YMM_LEN])); _mm256_stream_ps((float*)&dst[6 * YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[7 * YMM_LEN])); _mm256_stream_ps((float*)&dst[7 * YMM_LEN], ymm7); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_128ELEMS) { #if (GMS_MAN_PREFETCH) == _mm_prefetch((const char*)&src[0], _MM_HINT_T0); _mm_prefetch((const char*)&src[2 * YMM_LEN], _MM_HINT_T0); _mm_prefetch((const char*)&src[4 * YMM_LEN], _MM_HINT_T0); _mm_prefetch((const char*)&src[6 * YMM_LEN], _MM_HINT_T0); _mm_prefetch((const char*)&src[8 * YMM_LEN], _MM_HINT_T0); #endif const __m256 ymm0(_mm256_loadu_ps((float*)&src[0])); _mm256_stream_ps((float*)&dst[0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[1 * YMM_LEN])); _mm256_stream_ps((float*)&dst[1 * YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[2 * YMM_LEN])); _mm256_stream_ps((float*)&dst[2 * YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[3 * YMM_LEN])); _mm256_stream_ps((float*)&dst[3 * YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[4 * YMM_LEN])); _mm256_stream_ps((float*)&dst[4 * YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[5 * YMM_LEN])); _mm256_stream_ps((float*)&dst[5 * YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[6 * YMM_LEN])); _mm256_stream_ps((float*)&dst[6 * YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[7 * YMM_LEN])); _mm256_stream_ps((float*)&dst[7 * YMM_LEN], ymm7); const __m256 ymm8(_mm256_loadu_ps((float*)&src[8 * YMM_LEN])); _mm256_stream_ps((float*)&dst[8 * YMM_LEN], ymm8); const __m256 ymm9(_mm256_loadu_ps((float*)&src[9 * YMM_LEN])); _mm256_stream_ps((float*)&dst[9 * YMM_LEN], ymm9); const __m256 ymm10(_mm256_loadu_ps((float*)&src[10 * YMM_LEN])); _mm256_stream_ps((float*)&dst[10 * YMM_LEN], ymm10); const __m256 ymm11(_mm256_loadu_ps((float*)&src[11 * YMM_LEN])); _mm256_stream_ps((float*)&dst[11 * YMM_LEN], ymm11); const __m256 ymm12(_mm256_loadu_ps((float*)&src[12 * YMM_LEN])); _mm256_stream_ps((float*)&dst[12 * YMM_LEN], ymm12); const __m256 ymm13(_mm256_loadu_ps((float*)&src[13 * YMM_LEN])); _mm256_stream_ps((float*)&dst[13 * YMM_LEN], ymm13); const __m256 ymm14(_mm256_loadu_ps((float*)&src[14 * YMM_LEN])); _mm256_stream_ps((float*)&dst[14 * YMM_LEN], ymm14); const __m256 ymm15(_mm256_loadu_ps((float*)&src[15 * YMM_LEN])); _mm256_stream_ps((float*)&dst[15 * YMM_LEN], ymm15); _mm_sfence(); return; } else if (_nelems <= MAXFLOATSPERPAGE4KiB){ int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count(1024) #endif for (i = 0; i != ROUND_TO_EIGHT(_nelems, 8); i += 64) { _mm_prefetch((const char*)&src[i], _MM_HINT_T0); const __m256 ymm0(_mm256_loadu_ps((float*)&src[i + 0])); _mm256_stream_ps((float*)&dst[i + 0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[i + 1 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 1 * YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[i + 2 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 2 * YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[i + 3 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 3 * YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[i + 4 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 4 * YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[i + 5 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 5 * YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[i + 6 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 6 * YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[i + 7 * YMM_LEN])); _mm256_stream_ps((float*)&dst[i + 7 * YMM_LEN], ymm7); } _mm_sfence(); #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count min(1),avg(4),max(7) #endif for (; i != _nelems; ++i) { dst[i] = src[i]; } return; } else if (_nelems > MAXFLOATSPERPAGE4KiB) { int32_t j; for (int32_t k = 0; k != _nelems; k += MAXFLOATSPERPAGE4KiB) { volatile float t = src[k + MAXFLOATSPERPAGE4KiB]; for (j = k + 128; j != k + MAXFLOATSPERPAGE4KiB; j += 64) { _mm_prefetch((const char*)&src[j], _MM_HINT_T0); } for (j = k; j != k + MAXFLOATSPERPAGE4KiB; j += 64) { const __m256 ymm0(_mm256_loadu_ps((float*)&src[j + 0])); _mm256_stream_ps((float*)&dst[j + 0], ymm0); const __m256 ymm1(_mm256_loadu_ps((float*)&src[j + 1 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 1 * YMM_LEN], ymm1); const __m256 ymm2(_mm256_loadu_ps((float*)&src[j + 2 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 2 * YMM_LEN], ymm2); const __m256 ymm3(_mm256_loadu_ps((float*)&src[j + 3 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 3 * YMM_LEN], ymm3); const __m256 ymm4(_mm256_loadu_ps((float*)&src[j + 4 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 4 * YMM_LEN], ymm4); const __m256 ymm5(_mm256_loadu_ps((float*)&src[j + 5 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 5 * YMM_LEN], ymm5); const __m256 ymm6(_mm256_loadu_ps((float*)&src[j + 6 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 6 * YMM_LEN], ymm6); const __m256 ymm7(_mm256_loadu_ps((float*)&src[j + 7 * YMM_LEN])); _mm256_stream_ps((float*)&dst[j + 7 * YMM_LEN], ymm7); } } _mm_sfence(); return; } } #if defined __AVX512F__ __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_cached_memmove(void * __restrict _Dst, const void * __restrict _Src, const int32_t _nelems) { if (MEMMOVE_1ELEM <= _nelems) { return; } char * __restrict dst = (char *)_Dst; const char * __restrict src = (char *)_Src; if (_nelems <= MEMMOVE_16ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_storeu_ps((float*)&dst[0],zmm0); return; } else if ( _nelems <= MEMMOVE_32ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_storeu_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[1*ZMM_LEN], zmm1); return; } else if ( _nelems <= MEMMOVE_64ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_storeu_ps((float*)&dst[0],zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[1*ZMM_LEN],zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[2*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[2*ZMM_LEN],zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[3*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[3*ZMM_LEN],zmm3); return; } else if ( _nelems <= MEMMOVE_128ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_storeu_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[1*ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[2*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[2*ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[3*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[3*ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[4*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[4*ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[5*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[5*ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[6*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[6*ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[7*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[7*ZMM_LEN], zmm7); return; } else if ( _nelems <= MEMMOVE_256ELEMS) { #if (GMS_MAN_PREFETCH) _mm_prefetch((const char *)&src[0], _MM_HINT_T0); _mm_prefetch((const char *)&src[1*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[2*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[3*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[4*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[5*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[6*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[7*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[8*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[9*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[10*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[11*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[12*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[13*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[14*ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[15*ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_storeu_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[1*ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[2*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[2*ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[3*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[3*ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[4*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[4*ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[5*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[5*ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[6*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[6*ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[7*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[7*ZMM_LEN], zmm7); const __m512 zmm8(_mm512_loadu_ps((float*)&src[8*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[8*ZMM_LEN], zmm8); const __m512 zmm9(_mm512_loadu_ps((float*)&src[9*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[9*ZMM_LEN], zmm9); const __m512 zmm10(_mm512_loadu_ps((float*)&src[10*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[10*ZMM_LEN], zmm10); const __m512 zmm11(_mm512_loadu_ps((float*)&src[11*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[11*ZMM_LEN], zmm11); const __m512 zmm12(_mm512_loadu_ps((float*)&src[12*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[12*ZMM_LEN], zmm12); const __m512 zmm13(_mm512_loadu_ps((float*)&src[13*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[13*ZMM_LEN], zmm13); const __m512 zmm14(_mm512_loadu_ps((float*)&src[14*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[14*ZMM_LEN], zmm14); const __m512 zmm15(_mm512_loadu_ps((float*)&src[15*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[15*ZMM_LEN], zmm15); return; } else if ( _nelems <= PAGE4KiB) { int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count(1024) #endif for (i = 0; i != ROUND_TO_SIXTEEN(_nelems,16); i += 128) { _mm_prefetch((const char *)&src[i+0], _MM_HINT_T0); const __m512 zmm0(_mm512_loadu_ps((float*)&src[i+0])); _mm512_storeu_ps((float*)&dst[i+0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[i+1*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+1*ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[i+2*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+2*ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[i+3*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+3*ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[i+4*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+4*ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[i+5*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+5*ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[i+6*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+6*ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[i+7*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[i+7*ZMM_LEN], zmm7); } #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count min(1),avg(8),max(15) #endif for (; i != _nelems; ++i) { dst[i] = src[i]; } return; } else if (_nelems > MAXFLOATSPERPAGE4KiB) { int32_t j; for (int32_t k = 0; k != _nelems; k += MAXFLOATSPERPAGE4KiB) { volatile float t = src[k + MAXFLOATSPERPAGE4KiB]; for ( j = k + 128; j != k + MAXFLOATSPERPAGE4KiB; j += 128) { _mm_prefetch((const char*)&src[j], _MM_HINT_T0); } for (j = k; j != k + MAXFLOATSPERPAGE4KiB; j += 128) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[j+0])); _mm512_storeu_ps((float*)&dst[j+0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[j+1*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+1*ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[j+2*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+2*ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[j+3*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+3*ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[j+4*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+4*ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[j+5*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+5*ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[j+6*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+6*ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[j+7*ZMM_LEN])); _mm512_storeu_ps((float*)&dst[j+7*ZMM_LEN], zmm7); } } return; } } __ATTR_ALWAYS_INLINE__ __ATTR_HOT__ __ATTR_ALIGN__(32) static inline void avx512_uncached_memmove(void * __restrict _Dst, const void * __restrict _Src, const int32_t _nelems) { if (MEMMOVE_1ELEM <= _nelems) { return; } char * __restrict dst = (char*)_Dst; const char * __restrict src = (char*)_Src; uintptr_t dst_val = (uintptr_t)dst; int32_t misalign = 0; int32_t nbytes = 4*_nelems; if (dst_val & 0x3F) { misalign = min_val(0x40 - (dst_val & 0x3F), nbytes); dst += misalign; dst_val += misalign; nbytes -= misalign; } if (_nelems <= MEMMOVE_16ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_stream_ps((float*)&dst[0], zmm0); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_32ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_stream_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[1 * ZMM_LEN], zmm1); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_64ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_stream_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[1 * ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[2 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[2 * ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[3 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[3 * ZMM_LEN], zmm3); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_128ELEMS) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_stream_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[1 * ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[2 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[2 * ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[3 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[3 * ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[4 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[4 * ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[5 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[5 * ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[6 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[6 * ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[7 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[7 * ZMM_LEN], zmm7); _mm_sfence(); return; } else if (_nelems <= MEMMOVE_256ELEMS) { #if (GMS_MAN_PREFETCH) == 1 _mm_prefetch((const char *)&src[0], _MM_HINT_T0); _mm_prefetch((const char *)&src[1 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[2 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[3 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[4 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[5 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[6 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[7 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[8 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[9 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[10 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[11 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[12 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[13 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[14 * ZMM_LEN], _MM_HINT_T0); _mm_prefetch((const char *)&src[15 * ZMM_LEN], _MM_HINT_T0); #endif const __m512 zmm0(_mm512_loadu_ps((float*)&src[0])); _mm512_stream_ps((float*)&dst[0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[1 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[1 * ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[2 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[2 * ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[3 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[3 * ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[4 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[4 * ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[5 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[5 * ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[6 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[6 * ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[7 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[7 * ZMM_LEN], zmm7); const __m512 zmm8(_mm512_loadu_ps((float*)&src[8 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[8 * ZMM_LEN], zmm8); const __m512 zmm9(_mm512_loadu_ps((float*)&src[9 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[9 * ZMM_LEN], zmm9); const __m512 zmm10(_mm512_loadu_ps((float*)&src[10 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[10 * ZMM_LEN], zmm10); const __m512 zmm11(_mm512_loadu_ps((float*)&src[11 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[11 * ZMM_LEN], zmm11); const __m512 zmm12(_mm512_loadu_ps((float*)&src[12 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[12 * ZMM_LEN], zmm12); const __m512 zmm13(_mm512_loadu_ps((float*)&src[13 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[13 * ZMM_LEN], zmm13); const __m512 zmm14(_mm512_loadu_ps((float*)&src[14 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[14 * ZMM_LEN], zmm14); const __m512 zmm15(_mm512_loadu_ps((float*)&src[15 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[15 * ZMM_LEN], zmm15); _mm_sfence(); return; } else if (_nelems <= PAGE4KiB) { int32_t i; #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count(1024) #endif for (i = 0; i != ROUND_TO_SIXTEEN(_nelems, 16); i += 128) { _mm_prefetch((const char *)&src[i + 0], _MM_HINT_T0); const __m512 zmm0(_mm512_loadu_ps((float*)&src[i + 0])); _mm512_stream_ps((float*)&dst[i + 0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[i + 1 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 1 * ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[i + 2 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 2 * ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[i + 3 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 3 * ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[i + 4 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 4 * ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[i + 5 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 5 * ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[i + 6 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 6 * ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[i + 7 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[i + 7 * ZMM_LEN], zmm7); } _mm_sfence(); #if defined __ICC || defined __INTEL_COMPILER #pragma loop_count min(1),avg(8),max(15) #endif for (; i != _nelems; ++i) { dst[i] = src[i]; } return; } else if (_nelems > MAXFLOATSPERPAGE4KiB) { int32_t j; for (int32_t k = 0; k != _nelems; k += MAXFLOATSPERPAGE4KiB) { volatile float t = src[k + MAXFLOATSPERPAGE4KiB]; for (j = k + 128; j != k + MAXFLOATSPERPAGE4KiB; j += 128) { _mm_prefetch((const char*)&src[j], _MM_HINT_T0); } for (j = k; j != k + MAXFLOATSPERPAGE4KiB; j += 128) { const __m512 zmm0(_mm512_loadu_ps((float*)&src[j + 0])); _mm512_stream_ps((float*)&dst[j + 0], zmm0); const __m512 zmm1(_mm512_loadu_ps((float*)&src[j + 1 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 1 * ZMM_LEN], zmm1); const __m512 zmm2(_mm512_loadu_ps((float*)&src[j + 2 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 2 * ZMM_LEN], zmm2); const __m512 zmm3(_mm512_loadu_ps((float*)&src[j + 3 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 3 * ZMM_LEN], zmm3); const __m512 zmm4(_mm512_loadu_ps((float*)&src[j + 4 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 4 * ZMM_LEN], zmm4); const __m512 zmm5(_mm512_loadu_ps((float*)&src[j + 5 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 5 * ZMM_LEN], zmm5); const __m512 zmm6(_mm512_loadu_ps((float*)&src[j + 6 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 6 * ZMM_LEN], zmm6); const __m512 zmm7(_mm512_loadu_ps((float*)&src[j + 7 * ZMM_LEN])); _mm512_stream_ps((float*)&dst[j + 7 * ZMM_LEN], zmm7); } } _mm_sfence(); return; } } #endif } // common } // gms #endif /*__GMS_SIMD_MEMOPS_H__*/
for_loop.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 #include "callback.h" #include <omp.h> int main() { int y[] = {0,1,2,3}; #pragma omp parallel num_threads(2) { //implicit barrier at end of for loop int i; #pragma omp for for (i = 0; i < 4; i++) { y[i]++; } print_current_address(); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // master thread implicit barrier at loop end // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // master thread implicit barrier at parallel end // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // worker thread explicit barrier // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}} // worker thread implicit barrier after parallel // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]] return 0; }
GB_unop__acosh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acosh_fc64_fc64 // op(A') function: GB_unop_tran__acosh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = cacosh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cacosh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = cacosh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOSH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acosh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cacosh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = cacosh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acosh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
common.h
#ifndef COMMON_H #define COMMON_H #include <sys/time.h> #ifdef ENABLE_OPENMP #include <omp.h> #endif //#define GCC_EXTENSION #define OPENMP_3_1 double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } template <class T> inline T my_fetch_add(T *ptr, T val) { #ifdef ENABLE_OPENMP #ifdef GCC_EXTENSION return __sync_fetch_and_add(ptr,val); #endif #ifdef OPENMP_3_1 T old; #pragma omp atomic capture {old = *ptr; *ptr += val;} return old; #endif #else T old; old = *ptr; *ptr += val; return old; #endif } template <class T> inline T my_fetch_sub(T *ptr, T val) { #ifdef ENABLE_OPENMP #ifdef GCC_EXTENSION return __sync_fetch_and_sub(ptr,val); #endif #ifdef OPENMP_3_1 T old; #pragma omp atomic capture {old = *ptr; *ptr -= val;} return old; #endif #else T old; old = *ptr; *ptr -= val; return old; #endif } ; template <class T> inline T my_compare_swap(T *ptr, T old_val, T new_val) { #ifdef ENABLE_OPENMP #ifdef GCC_EXTENSION return __sync_val_compare_and_swap(ptr,old_val,new_val); #endif #ifdef OPENMP_3_1 T old = *ptr; #pragma omp critical { if(*ptr == old_val) { *ptr = new_val; } } return old; #endif #else T old = *ptr; if(*ptr == old_val) *ptr = new_val; return old; #endif } ; template <class T> inline T atomicMin(T *ptr, T val) { T old = *ptr; #ifdef ENABLE_OPENMP #pragma omp critical #endif {if(val < *ptr) *ptr = val;} return old; } ; void __syncthreads() { #ifdef ENABLE_OPENMP #ifdef GCC_EXTENSION //#pragma omp barrier //__sync_synchronize(); #endif #ifdef OPENMP_3_1 #pragma omp barrier #endif #else #endif } #endif
GB_unop__identity_fp64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_fc32) // op(A') function: GB (_unop_tran__identity_fp64_fc32) // C type: double // A type: GxB_FC32_t // cast: double cij = (double) crealf (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) crealf (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) crealf (aij) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_fc32) ( double *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; double z = (double) crealf (aij) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; double z = (double) crealf (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
constant_density_acoustic_time_scalar_2D_6.h
#ifndef __CDA_TIME_SCALAR_2D_6__ #define __CDA_TIME_SCALAR_2D_6__ #include <stdlib.h> template< typename T, int ACCURACY > void cda_time_scalar_2D_6( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape T* k_Phix, int nr_k_Phix, int nc_k_Phix, // in - padded wavefield shape T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape T* C, int nr_C, int nc_C, // in - padded wavefield shape T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape T* xlpml, int n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* xrpml, int n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. double const& dt, // in double const& dx, // in double const& dz, // in int const& nx, // in int const& nz, // in T* kp1_Phix, int nr_kp1_Phix, int nc_kp1_Phix, // out T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out { enum {MAX_FD_SHIFT = ACCURACY/2}; T lapU = 0.0; // PML variable T sigmax = 0.0; T sigmaz = 0.0; // Time delta variables T dt2 = dt*dt; // Loop/index variables int idx; int zstride = 1; int xstride = nz; int s = zstride; int i, k; // shared space step square variable T dx2 = dx*dx; T dz2 = dz*dz; // private variables //non derivatives T fac1; T fac2; //derivatives T dux , duz; T dPhix, dPhiz; char* NUM = getenv("OMP_NUM_THREADS"); int Num_Th = atoi (NUM); #pragma omp parallel for private(sigmaz, sigmax, i, k, idx, dux, duz, dPhix, dPhiz, lapU, fac1, fac2) shared(dx, dx2, dz, dz2, nz, nx, kp1_Phix, kp1_Phiz, k_Phix, k_Phiz, n_zrpml, n_zlpml, n_xrpml, xrpml, xlpml, zrpml, zlpml, s, rhs, C, dt, dt2, km1_u, k_u, kp1_u) num_threads(Num_Th) collapse(2) for(i=0; i < nx; ++i) { for(k=0; k < nz; k++) { idx = i*xstride + k; kp1_Phix[idx] = 0.0; kp1_Phiz[idx] = 0.0; kp1_u[idx] = 0.0; // This handles homogeneous Dirichlet BCs and non-updating in ghost regions. if ((i == 0) || (i == nx-1)) continue; if ((k == 0) || (k == nz-1)) continue; lapU = 0.0; // Do the X direction // Left side if (i==0) { //decentered derivative 3 ranks on the right dux = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*0.0+0.0+(3./4.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./60.)*k_u[idx+3*nz])/dx; dPhix = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*0.0+0.0+(3./4.)*k_Phix[idx+nz]+(-3./20.)*k_Phix[idx+2*nz]+(1./60.)*k_Phix[idx+3*nz])/dx; lapU += ((1./90.)*0.0+(-3./20.)*0.0+(3./2.)*0.0+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./90.)*k_u[idx+3*nz])/dx2; } else if (i == 1) { //decentered derivative 2 rank on the right dux = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*k_u[idx-nz]+0.0+(3./4.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./60.)*k_u[idx+3*nz])/dx; dPhix = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*k_Phix[idx-nz]+0.0+(3./4.)*k_Phix[idx+nz]+(-3./20.)*k_Phix[idx+2*nz]+(1./60.)*k_Phix[idx+3*nz])/dx; lapU += ((1./90.)*0.0+(-3./20.)*0.0+(3./2.)*k_u[idx-nz]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./90.)*k_u[idx+3*nz])/dx2; } else if (i == 2) { //decentered derivative 1 rank on the right dux = ((-1./60.)*0.0+(3./20.)*k_u[idx-2*nz]+(-3./4.)*k_u[idx-nz]+0.0+(3./4.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./60.)*k_u[idx+3*nz])/dx; dPhix = ((-1./60.)*0.0+(3./20.)*k_Phix[idx-2*nz]+(-3./4.)*k_Phix[idx-nz]+0.0+(3./4.)*k_Phix[idx+nz]+(-3./20.)*k_Phix[idx+2*nz]+(1./60.)*k_Phix[idx+3*nz])/dx; lapU += ((1./90.)*0.0+(-3./20.)*k_u[idx-2*nz]+(3./2.)*k_u[idx-nz]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./90.)*k_u[idx+3*nz])/dx2; } else if (i == nx-1) { //decentered derivative 3 ranks on the left dux = ((-1./60.)*k_u[idx-3*nz]+(3./20.)*k_u[idx-2*nz]+(-3./4.)*k_u[idx-nz]+0.0+(3./4.)*0.0+(-3./20.)*0.0+(1./60.)*0.0)/dx; dPhix = ((-1./60.)*k_Phix[idx-3*nz]+(3./20.)*k_Phix[idx-2*nz]+(-3./4.)*k_Phix[idx-nz]+0.0+(3./4.)*0.0+(-3./20.)*0.0+(1./60.)*0.0)/dx; lapU += ((1./90.)*k_u[idx-3*nz]+(-3./20.)*k_u[idx-2*nz]+(3./2.)*k_u[idx-nz]+(-49./18.)*k_u[idx]+(3./2.)*0.0+(-3./20.)*0.0+(1./90.)*0.0)/dx2; } else if (i == nx-2) { //decentered derivative 2 ranks on the left dux = ((-1./60.)*k_u[idx-3*nz]+(3./20.)*k_u[idx-2*nz]+(-3./4.)*k_u[idx-nz]+0.0+(3./4.)*k_u[idx+nz]+(-3./20.)*0.0+(1./60.)*0.0)/dx; dPhix = ((-1./60.)*k_Phix[idx-3*nz]+(3./20.)*k_Phix[idx-2*nz]+(-3./4.)*k_Phix[idx-nz]+0.0+(3./4.)*k_Phix[idx+nz]+(-3./20.)*0.0+(1./60.)*0.0)/dx; lapU += ((1./90.)*k_u[idx-3*nz]+(-3./20.)*k_u[idx-2*nz]+(3./2.)*k_u[idx-nz]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+nz]+(-3./20.)*0.0+(1./90.)*0.0)/dx2; } else if (k == nx-3) { //decentered derivative 1 rank on the left dux = ((-1./60.)*k_u[idx-3*nz]+(3./20.)*k_u[idx-2*nz]+(-3./4.)*k_u[idx-nz]+0.0+(3./4.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./60.)*0.0)/dx; dPhix = ((-1./60.)*k_Phix[idx-3*nz]+(3./20.)*k_Phix[idx-2*nz]+(-3./4.)*k_Phix[idx-nz]+0.0+(3./4.)*k_Phix[idx+nz]+(-3./20.)*k_Phix[idx+2*nz]+(1./60.)*0.0)/dx; lapU += ((1./90.)*k_u[idx-3*nz]+(-3./20.)*k_u[idx-2*nz]+(3./2.)*k_u[idx-nz]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./90.)*0.0)/dx2; } else { //classic centered derivative dux = ((-1./60.)*k_u[idx-3*nz]+(3./20.)*k_u[idx-2*nz]+(-3./4.)*k_u[idx-nz]+0.0+(3./4.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./60.)*k_u[idx+3*nz])/dx; dPhix = ((-1./60.)*k_Phix[idx-3*nz]+(3./20.)*k_Phix[idx-2*nz]+(-3./4.)*k_Phix[idx-nz]+0.0+(3./4.)*k_Phix[idx+nz]+(-3./20.)*k_Phix[idx+2*nz]+(1./60.)*k_Phix[idx+3*nz])/dx; lapU += ((1./90.)*k_u[idx-3*nz]+(-3./20.)*k_u[idx-2*nz]+(3./2.)*k_u[idx-nz]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+nz]+(-3./20.)*k_u[idx+2*nz]+(1./90.)*k_u[idx+3*nz])/dx2; } // Do the Z direction // Left side if (k==0) { //decentered derivative 3 ranks on the right duz = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*0.0+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dz; dPhiz = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*0.0+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dz; lapU += ((1./90.)*0.0+(-3./20.)*0.0+(3./2.)*0.0+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dz2; } else if (k == 1) { //decentered derivative 2 rank on the right duz = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dz; dPhiz = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dz; lapU += ((1./90.)*0.0+(-3./20.)*0.0+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dz2; } else if (k == 2) { //decentered derivative 1 rank on the right duz = ((-1./60.)*0.0+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dz; dPhiz = ((-1./60.)*0.0+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dz; lapU += ((1./90.)*0.0+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dz2; } else if (k == nz-1) { //decentered derivative 3 ranks on the left duz = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*0.0+(-3./20.)*0.0+(1./60.)*0.0)/dz; dPhiz = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*0.0+(-3./20.)*0.0+(1./60.)*0.0)/dz; lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*0.0+(-3./20.)*0.0+(1./90.)*0.0)/dz2; } else if (k == nz-2) { //decentered derivative 2 ranks on the left duz = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*0.0+(1./60.)*0.0)/dz; dPhiz = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*0.0+(1./60.)*0.0)/dz; lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*0.0+(1./90.)*0.0)/dz2; } else if (k == nz-3) { //decentered derivative 1 rank on the left duz = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*0.0)/dz; dPhiz = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*0.0)/dz; lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*0.0)/dz2; } else { //classic centered derivative duz = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dz; dPhiz = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dz; lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dz2; } sigmax = 0.0; sigmaz = 0.0; // Check if in left PML-X if((n_xlpml>0) && (i < n_xlpml)) { sigmax = xlpml[i]; } // Check if in right PML-X else if((n_xrpml>0) && (i >= nx-n_xrpml)) { sigmax = xrpml[n_xrpml-((nx-1)-i)]; } // Check if in left PML-Z if((n_zlpml>0) && (k < n_zlpml)) { sigmaz = zlpml[k]; } // Check if in right PML-Z else if((n_zrpml>0) && (k >= nz-n_zrpml)) { sigmaz = zrpml[n_zrpml-((nz-1)-k)]; // 0th element of the right pml array corresponds to n_zrpml'th node from the right boundary. } if((sigmaz != 0.0) || (sigmax != 0.0)) { kp1_Phix[idx] = k_Phix[idx] - dt*sigmax*k_Phix[idx] + dt*(sigmaz-sigmax)*dux; kp1_Phiz[idx] = k_Phiz[idx] - dt*sigmaz*k_Phiz[idx] + dt*(sigmax-sigmaz)*duz; fac1 = (2.0*dt2 / (2.0 + dt*(sigmax+sigmaz))); fac2 = (C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiz) - (km1_u[idx]-2.0*k_u[idx])/dt2 + (sigmax+sigmaz)*km1_u[idx]/(2.0*dt) - (sigmax*sigmaz)*k_u[idx]; kp1_u[idx] = fac1 * fac2; } else { kp1_Phix[idx] = k_Phix[idx]; kp1_Phiz[idx] = k_Phiz[idx]; kp1_u[idx] = dt2*(C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiz) - (km1_u[idx]-2.0*k_u[idx]); } } } }; template< typename T> void cda_time_scalar_2D_OMP_6( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape T* k_Phix, int nr_k_Phix, int nc_k_Phix, // in - padded wavefield shape T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape T* C, int nr_C, int nc_C, // in - padded wavefield shape T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape T* xlpml, int n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* xrpml, int n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. double const& dt, // in double const& dx, // in double const& dz, // in int const& nx, // in int const& nz, // in T* kp1_Phix, int nr_kp1_Phix, int nc_kp1_Phix, // out T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out { cda_time_scalar_2D_6<T,6>( km1_u, nr_km1_u, nc_km1_u, // in - padded wavefield shape k_Phix, nr_k_Phix, nc_k_Phix, // in - padded wavefield shape k_Phiz, nr_k_Phiz, nc_k_Phiz, // in - padded wavefield shape k_u, nr_k_u, nc_k_u, // in - padded wavefield shape C, nr_C, nc_C, // in - padded wavefield shape rhs, nr_rhs, nc_rhs, // in - padded wavefield shape xlpml, n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined. xrpml, n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined. zlpml, n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. zrpml, n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. dt, // in dx, // in dz, // in nx, // in nz, // in kp1_Phix, nr_kp1_Phix, nc_kp1_Phix, // out kp1_Phiz, nr_kp1_Phiz, nc_kp1_Phiz, // out kp1_u, nr_kp1_u, nc_kp1_u ); // out } #endif
program5.5.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #include <malloc.h> int main(int agrc, char* argv[]) { int thread_count = strtol(argv[1], NULL, 10); int n = strtol(argv[2], NULL, 10); int* array = (int *)malloc(n * sizeof(int)); int temp; double start, end; srand(time(NULL)); for (int i = 0; i < n; i++) { array[i] = rand() % RAND_MAX; } start = omp_get_wtime(); int phase, i; #pragma omp parallel num_threads(thread_count) default(none) shared(array, n) private(temp, i, phase) for (phase = 0; phase < n; phase++) { if (phase % 2 == 0) { #pragma omp for for (i = 1; i < n; i+=2) { if (array[i - 1] > array[i]) { temp = array[i - 1]; array[i - 1] = array[i]; array[i] = temp; } } } else { #pragma omp for for (i = 1; i < n; i+=2) { if (array[i] > array[i + 1]) { temp = array[i + 1]; array[i + 1] = array[i]; array[i] = temp; } } } } end = omp_get_wtime(); printf("The time is %lf.\n", end - start); return 0; }
vector.h
/****************************************************************************************************************/ /* */ /* OpenNN: Open Neural Networks Library */ /* www.opennn.net */ /* */ /* V E C T O R C O N T A I N E R */ /* */ /* Artificial Intelligence Techniques SL */ /* artelnics@artelnics.com */ /* */ /****************************************************************************************************************/ #ifndef __VECTOR_H__ #define __VECTOR_H__ // System includes #include <algorithm> #include <cassert> #include <cmath> #include <cstdlib> #include <functional> #include <fstream> #include <iomanip> #include <iostream> #include <iterator> #include <istream> #include <map> #include <numeric> #include <ostream> #include <sstream> #include <stdexcept> #include <string> #include <vector> #include <limits> #include <climits> #include <ctime> #include <time.h> #ifdef __OPENNN_MPI__ #include <mpi.h> #endif // Eigen includes #include "../eigen/Eigen" using namespace std; namespace OpenNN { // Forward declarations template <class T> class Matrix; template <class T> T calculate_random_uniform(const T & = -1, const T & = 1); template <class T> T calculate_random_normal(const T & = 0.0, const T & = 1.0); template <class T> string write_elapsed_time(const T&); template <class T> string write_date_from_time_t(const T&); template <class T> vector<string> split_string(const T&, const char&); template <class T> void replace_substring(T& source, const T& find, const T& replace); template <class T> struct Histogram; template <class T> struct Statistics; template <class T> struct LinearRegressionParameters; template <class T> struct LogisticRegressionParameters; template <class T> struct KMeansResults; /// This template represents an array of any kind of numbers or objects. /// It inherits from the vector of the standard library, and implements /// additional utilities. template <typename T> class Vector : public vector<T> { public: // CONSTRUCTORS // Default constructor. explicit Vector(); // General constructor. explicit Vector(const size_t &); // Constant reference initialization constructor. explicit Vector(const size_t &, const T &); // File constructor. explicit Vector(const string &); // Sequential constructor. explicit Vector(const T &, const double &, const T &); // Input iterators constructor template <class InputIterator> explicit Vector(InputIterator, InputIterator); // Copy constructor. Vector(const vector<T> &); Vector(const Vector<T> &); // Initializer list Vector(const initializer_list<T>&); Vector(const Vector< Vector<T> >&); // DESTRUCTOR virtual ~Vector(); // OPERATORS bool operator== (const T &) const; bool operator!= (const T &) const; bool operator>(const T &) const; bool operator<(const T &) const; bool operator>= (const T &) const; bool operator<= (const T &) const; // METHODS // Get methods // Set methods void set(); void set(const size_t &); void set(const size_t &, const T &); void set(const string &); void set(const T &, const double &, const T &); void set(const Vector &); #ifdef __OPENNN_MPI__ void set_MPI(const MPI_Datatype); #endif T get_first() const; T get_last() const; T get_before_last() const; // Initialization methods void initialize(const T &); void initialize_first(const size_t&, const T &); void initialize_sequential(); void randomize_uniform(const double & = -1.0, const double & = 1.0); void randomize_uniform(const Vector<double>&, const Vector<double>&); void randomize_normal(const double & = 0.0, const double & = 1.0); void randomize_normal(const Vector<double> &, const Vector<double> &); void randomize_binary(const double & = 0.5, const double & = 0.5); void map(Vector<T>&, const T&, const T&); void map(Vector<T>&, Vector<T>&, const T&, const T&, const T&); void trim(); Vector<T> trimmed() const; // Checking methods bool contains(const T &) const; bool contains_greater_than(const T &) const; bool contains(const Vector<T> &) const; bool has_same_elements(const Vector<T>&) const; bool is_in(const T &, const T &) const; bool is_constant(const double & = 0.0) const; bool is_constant_string() const; bool is_crescent() const; bool is_decrescent() const; bool is_binary() const; bool is_binary(const Vector<size_t>&) const; bool is_integer() const; bool is_integer(const Vector<size_t>&) const; bool is_discrete(const size_t&) const; bool is_discrete(const Vector<size_t>&, const size_t&) const; bool is_positive() const; bool is_negative() const; bool check_period(const double& period) const; bool perform_Lilliefors_normality_test(const double&) const; Vector<bool> perform_Lilliefors_normality_test(const Vector<double>&) const; double calculate_normal_distribution_distance() const; double calculate_half_normal_distribution_distance() const; double calculate_uniform_distribution_distance() const; Vector<bool> perform_normality_analysis() const; double calculate_normality_parameter() const; Vector<T> calculate_variation_percentage() const; size_t perform_distribution_distance_analysis() const; size_t perform_distribution_distance_analysis_missing_values(const Vector<size_t>&) const; int get_lower_index(const size_t&, const T&) const; int get_upper_index(const size_t&, const T&) const; Vector<T> get_reverse() const; Vector<T> impute_time_series_missing_values_mean(const T&) const; // String methods void replace_substring(const string&, const string&); // Count methods size_t count_equal_to(const T&) const; double count_equal_to(const T&, const Vector<double>&) const; size_t count_equal_to(const Vector<T>&) const; size_t count_not_equal_to(const T&) const; size_t count_not_equal_to(const Vector<T>&) const; size_t count_positive() const; size_t count_negative() const; size_t count_integers(const size_t&) const; size_t count_integers_missing_values(const Vector<size_t>&, const size_t&) const; Vector<size_t> get_indices_equal_to(const T &) const; Vector<size_t> get_indices_less_than(const T &) const; Vector<size_t> get_indices_greater_than(const T &) const; size_t count_greater_than(const T &) const; size_t count_less_than(const T &) const; size_t count_greater_equal_to(const T &) const; size_t count_less_equal_to(const T &) const; size_t count_between(const T &, const T &) const; Matrix<T> count_daily_series_occurrences() const; Matrix<T> count_weekly_series_occurrences() const; Matrix<T> count_monthly_series_occurrences() const; Matrix<T> count_yearly_series_occurrences() const; Matrix<T> count_monthly_series_occurrences(const size_t&, const size_t&, const size_t&, const size_t&) const; Matrix<T> count_monthly_occurrences() const; size_t count_date_occurrences(const size_t&) const; size_t count_month_occurrences(const size_t&) const; size_t count_date_occurrences(const size_t&, const size_t&) const; size_t count_contains(const string&) const; Vector<T> merge(const Vector<T>&, const char&) const; // Vector<double> get_binary_vector(const Vector<T>&) const; // Matrix<T> get_binary_matrix(const char& separator = ' ') const; // Matrix<T> get_unique_binary_matrix(const char&, const Vector<T>&) const; Vector<T> filter_equal_to(const T&) const; Vector<T> filter_not_equal_to(const T&) const; Vector<T> filter_equal_to(const Vector<T>&) const; Vector<T> filter_not_equal_to(const Vector<T>&) const; Vector<T> filter_numbers() const; Vector<T> filter_not_numbers() const; Vector<T> get_positive_elements() const; Vector<size_t> calculate_between_indices(const T&, const T&) const; Vector<size_t> calculate_equal_to_indices(const T &) const; Vector<size_t> calculate_equal_to_indices(const Vector<T>&) const; Vector<size_t> calculate_not_equal_to_indices(const T &) const; Vector<size_t> calculate_not_equal_to_indices(const Vector<T> &) const; Vector<T> filter_minimum_maximum(const T&, const T&) const; Vector<size_t> calculate_contains_indices(const string&) const; Vector<size_t> calculate_less_than_indices(const T &) const; Vector<size_t> calculate_greater_than_indices(const T &) const; Vector<size_t> calculate_less_equal_to_indices(const T &) const; Vector<size_t> calculate_greater_equal_to_indices(const T &) const; Vector<size_t> calculate_total_frequencies(const Vector< Histogram<T> > &) const; Vector<size_t> calculate_total_frequencies_missing_values(const Vector<size_t> &, const Vector< Histogram<T> > &) const; Vector<double> perform_Box_Cox_transformation(const double& = 1) const; Vector<double> calculate_percentage(const size_t&) const; double calculate_error(const Vector<T>&) const; // Statistics methods T calculate_minimum() const; T calculate_maximum() const; Vector<T> calculate_minimum_maximum() const; T calculate_minimum_missing_values(const Vector<size_t> &) const; T calculate_maximum_missing_values(const Vector<size_t> &) const; Vector<T> calculate_minimum_maximum_missing_values(const Vector<size_t> &) const; Vector<T> calculate_explained_variance() const; // Histogram methods Histogram<T> calculate_histogram(const size_t & = 10) const; Histogram<T> calculate_histogram_binary() const; Histogram<T> calculate_histogram_integers(const size_t & = 10) const; Histogram<T> calculate_histogram_missing_values(const Vector<size_t> &, const size_t & = 10) const; Histogram<T> calculate_histogram_binary_missing_values(const Vector<size_t> &) const; Histogram<T> calculate_histogram_integers_missing_values(const Vector<size_t> &, const size_t & = 10) const; Vector<T> calculate_moving_average(const T&) const; Vector<T> calculate_moving_average_cyclic(const T&) const; Vector<double> calculate_simple_moving_average(const size_t&) const; Vector<double> calculate_exponential_moving_average(const size_t&) const; double calculate_last_exponential_moving_average(const size_t&) const; Vector<double> calculate_exponential_moving_average_with_initial_average(const size_t&) const; Vector<size_t> calculate_bars_chart() const; size_t get_first_index(const T&) const; size_t calculate_minimal_index() const; size_t calculate_maximal_index() const; Vector<size_t> calculate_minimal_indices(const size_t &) const; Vector<size_t> calculate_k_minimal_indices(const size_t &) const; Vector<size_t> calculate_maximal_indices(const size_t &) const; Vector<size_t> calculate_minimal_maximal_index() const; Vector<T> calculate_pow(const T &) const; Vector<T> calculate_competitive() const; Vector<T> calculate_softmax() const; Matrix<T> calculate_softmax_Jacobian() const; Vector<bool> calculate_binary() const; Vector<T> calculate_square_root_elements() const; Vector<T> calculate_cumulative() const; size_t calculate_cumulative_index(const T &) const; size_t calculate_closest_index(const T &) const; T calculate_sum() const; Vector<T> calculate_sum_gradient() const; Matrix<T> calculate_sum_Hessian() const; T calculate_partial_sum(const Vector<size_t> &) const; T calculate_sum_missing_values(const Vector<size_t> &) const; T calculate_product() const; double calculate_mean() const; double calculate_mean(const size_t&, const size_t&) const; double calculate_linear_trend() const; double calculate_linear_trend(const size_t&, const size_t&) const; double calculate_percentage_of_variation() const; Vector<double> calculate_percentage_of_variation(const size_t&) const; double calculate_last_percentage_of_variation(const size_t&) const; T calculate_mode() const; T calculate_mode_missing_values(const Vector<size_t>&) const; double calculate_variance() const; double calculate_covariance(const Vector<double>&) const; double calculate_standard_deviation() const; Vector<double> calculate_standard_deviation(const size_t&) const; double calculate_asymmetry() const; double calculate_kurtosis() const; double calculate_median() const; Vector<double> calculate_quartiles() const; Vector<double> calculate_quartiles_missing_values(const Vector<size_t> &) const; Vector<double> calculate_percentiles() const; Vector<double> calculate_mean_standard_deviation() const; double calculate_mean_missing_values(const Vector<size_t> &) const; double calculate_variance_missing_values(const Vector<size_t> &) const; double calculate_weighted_mean(const Vector<double> &) const; double calculate_standard_deviation_missing_values(const Vector<size_t> &) const; double calculate_asymmetry_missing_values(const Vector<size_t> &) const; double calculate_kurtosis_missing_values(const Vector<size_t> &) const; Statistics<T> calculate_statistics() const; Statistics<T> calculate_statistics_missing_values(const Vector<size_t> &) const; Vector<double> calculate_shape_parameters() const; Vector<double> calculate_shape_parameters_missing_values(const Vector<size_t> &) const; Vector<double> calculate_box_plot() const; Vector<double> calculate_box_plot_missing_values(const Vector<size_t> &) const; size_t calculate_sample_index_proportional_probability() const; // Norm methods double calculate_L1_norm() const; Vector<T> calculate_sign() const; Vector<T> calculate_L1_norm_gradient() const; Matrix<T> calculate_L1_norm_Hessian() const; double calculate_L2_norm() const; Vector<T> calculate_L2_norm_gradient() const; Matrix<T> calculate_L2_norm_Hessian() const; double calculate_Lp_norm(const double &) const; Vector<double> calculate_Lp_norm_gradient(const double &) const; Vector<T> calculate_normalized() const; //double calculate_distance(const Vector<double> &) const; double calculate_euclidean_distance(const Vector<T> &) const; double calculate_euclidean_weighted_distance(const Vector<T>&, const Vector<double>&) const; double calculate_manhattan_distance(const Vector<T> &) const; double calculate_manhattan_weighted_distance(const Vector<T>&, const Vector<double>&) const; double calculate_sum_squared_error(const Vector<double> &) const; double calculate_sum_squared_error(const Matrix<T> &, const size_t &, const Vector<size_t> &) const; double calculate_Minkowski_error(const Vector<double> &, const double &) const; LinearRegressionParameters<T> calculate_linear_regression_parameters(const Vector<T> &) const; Vector<T> calculate_absolute_value() const; void apply_absolute_value(); // Bounding methods Vector<T> calculate_lower_bounded(const T &) const; Vector<T> calculate_lower_bounded(const Vector<T> &) const; Vector<T> calculate_upper_bounded(const T &) const; Vector<T> calculate_upper_bounded(const Vector<T> &) const; Vector<T> calculate_lower_upper_bounded(const T &, const T &) const; Vector<T> calculate_lower_upper_bounded(const Vector<T> &, const Vector<T> &) const; void apply_lower_bound(const T &); void apply_lower_bound(const Vector<T> &); void apply_upper_bound(const T &); void apply_upper_bound(const Vector<T> &); void apply_lower_upper_bounds(const T &, const T &); void apply_lower_upper_bounds(const Vector<T> &, const Vector<T> &); // Rank methods Vector<size_t> sort_ascending_indices() const; Vector<T> sort_ascending_values() const; Vector<size_t> calculate_lower_indices(const size_t&) const; Vector<T> calculate_lower_values(const size_t&) const; Vector<size_t> sort_descending_indices() const; Vector<T> sort_descending_values() const; Vector<size_t> calculate_less_rank() const; Vector<double> calculate_less_rank_with_ties() const; Vector<size_t> calculate_greater_rank() const; Vector<size_t> calculate_greater_indices() const; Vector<T> sort_rank(const Vector<size_t>&) const; // Mathematical operators inline Vector<T> operator= (const initializer_list<T> &) const; inline Vector<T> operator+ (const T &) const; inline Vector<T> operator+ (const Vector<T> &) const; inline Vector<T> operator-(const T &) const; inline Vector<T> operator-(const Vector<T> &) const; inline Vector<T> operator*(const T &) const; inline Vector<T> operator*(const Vector<T> &) const; inline Matrix<T> operator*(const Matrix<T> &) const; inline double dot(const Vector<double> &) const; Vector<double> dot(const Matrix<T> &) const; Matrix<T> direct(const Vector<T> &) const; Vector<T> operator/(const T &) const; Vector<T> operator/(const Vector<T> &) const; void operator+= (const T &); void operator+= (const Vector<T> &); void operator-= (const T &); void operator-= (const Vector<T> &); void operator*= (const T &); void operator*= (const Vector<T> &); void operator/= (const T &); void operator/= (const Vector<T> &); // Filtering methods Vector<T> filter_positive() const; Vector<T> filter_negative() const; size_t count_dates(const size_t&, const size_t&, const size_t&, const size_t&, const size_t&, const size_t&) const; Vector<size_t> filter_dates(const size_t&, const size_t&, const size_t&, const size_t&, const size_t&, const size_t&) const; Vector<size_t> calculate_Tukey_outliers(const double& = 1.5) const; Vector<size_t> calculate_Tukey_outliers_iterative(const double& = 1.5) const; Vector<size_t> calculate_histogram_outliers(const size_t&, const size_t&) const; Vector<size_t> calculate_histogram_outliers_iterative(const size_t&, const size_t&) const; // Scaling methods void scale_minimum_maximum(const T &, const T &); void scale_minimum_maximum(const Statistics<T> &); Statistics<T> scale_minimum_maximum(); void scale_mean_standard_deviation(const T &, const T &); void scale_mean_standard_deviation(const Statistics<T> &); Statistics<T> scale_mean_standard_deviation(); void scale_standard_deviation(const T &); void scale_standard_deviation(const Statistics<T> &); Statistics<T> scale_standard_deviation(); void scale_standard_deviation(const Vector<T> &); Vector<T> calculate_scaled_minimum_maximum() const; Vector<T> calculate_scaled_minimum_maximum_0_1() const; Vector<T> calculate_scaled_minimum_maximum(const Vector<T> &, const Vector<T> &) const; Vector<T> calculate_scaled_mean_standard_deviation() const; Vector<T> calculate_scaled_mean_standard_deviation(const Vector<T> &, const Vector<T> &) const; Vector<T> calculate_scaled_standard_deviation(const Vector<T> &) const; // Unscaling methods Vector<T> calculate_unscaled_minimum_maximum(const Vector<T> &, const Vector<T> &) const; Vector<T> calculate_unscaled_mean_standard_deviation(const Vector<T> &, const Vector<T> &) const; void unscale_minimum_maximum(const Vector<T> &, const Vector<T> &); void unscale_mean_standard_deviation(const Vector<T> &, const Vector<T> &); Vector<T> calculate_reverse_scaling(void) const; Vector<T> calculate_scaling_between(const T &, const T &, const T &, const T &) const; // Arranging methods Matrix<T> to_diagonal_matrix() const; Vector<T> get_subvector(const size_t&, const size_t&) const; Vector<T> get_subvector(const Vector<size_t> &) const; Vector<T> get_subvector(const Vector<bool> &) const; Vector<T> get_subvector_random(const size_t&) const; Vector<T> get_first(const size_t &) const; Vector<T> get_last(const size_t &) const; Vector<T> delete_first(const size_t &) const; Vector<T> delete_last(const size_t &) const; Vector<T> get_integer_elements(const size_t&) const; Vector<T> get_integer_elements_missing_values(const Vector<size_t>&, const size_t&) const; Matrix<T> get_power_matrix(const size_t&) const; // File operations void load(const string &); void save(const string &, const char& = ' ') const; void tuck_in(const size_t &, const Vector<T> &); Vector<T> insert_element(const size_t &, const T &) const; Vector<T> replace_element(const size_t &, const Vector<T> &) const; Vector<T> replace_value(const T&, const T&) const; Vector<T> replace_value_if_contains(const T&, const T&) const; Vector<string> split_element(const size_t &, const char&) const; Vector<T> delete_index(const size_t &) const; Vector<T> delete_indices(const Vector<size_t> &) const; Vector<T> delete_value(const T &) const; Vector<T> delete_values(const Vector<T> &) const; Vector<T> assemble(const Vector<T> &) const; static Vector<T> assemble(const Vector< Vector<T> > &); Vector<T> get_difference(const Vector<T> &) const; Vector<T> get_union(const Vector<T> &) const; Vector<T> get_intersection(const Vector<T> &) const; Vector<T> get_unique_items(const char& separator = ' ') const; Vector<T> get_unique_elements() const; Vector<T> get_unique_elements_unsorted() const; Vector<size_t> get_unique_elements_first_indices() const; Vector< Vector<size_t> > get_unique_elements_indices() const; Vector<size_t> count_unique() const; void print_unique() const; Vector<T> calculate_top(const size_t&) const; Matrix<T> calculate_top_matrix(const size_t&) const; Matrix<T> calculate_top_matrix_over(const size_t&, const size_t&) const; void print_top(const size_t&) const; vector<T> to_std_vector() const; Vector<double> to_double_vector() const; Vector<int> to_int_vector() const; Vector<size_t> to_size_t_vector() const; Vector<time_t> to_time_t_vector() const; Vector<bool> to_bool_vector() const; Vector<string> to_string_vector() const; Vector<double> string_to_double(const double& exception_value = 999) const; Vector<int> string_to_int(const int& exception_value = 999) const; Vector<size_t> string_to_size_t(const size_t& exception_value = 999) const; Vector<time_t> string_to_time_t(const time_t& exception_value = 999) const; Vector<time_t> www_mmm_ddd_yyyy_hh_mm_ss_to_time() const; Vector<time_t> yyyy_mm_to_time(const char& = '/') const; Vector<time_t> dd_mm_yyyy_to_time(const char& = '/') const; Vector<time_t> yyyy_mm_dd_to_time(const char& = '/') const; Matrix<T> dd_mm_yyyy_to_dd_yyyy(const char& = '/') const; Matrix<T> yyyy_mm_dd_to_dd_yyyy(const char& = '/') const; Matrix<T> mm_yyyy_to_mm_yyyy(const char& = '/') const; Vector<T> yyyy_mm_dd_to_weekday(const char& = '/') const; Vector<T> yyyy_mm_dd_to_yearday(const char& = '/') const; Vector<struct tm> time_stamp_to_time_structure() const; Vector< Vector<T> >split(const size_t&) const; Matrix<T> to_row_matrix() const; Matrix<T> to_column_matrix() const; void parse(const string &); string to_text(const char& = ',') const; string to_text(const string& = ",") const; string vector_to_string(const char&, const char&) const; string vector_to_string(const char&) const; string vector_to_string() const; string stack_vector_to_string() const; Vector<string> write_string_vector(const size_t & = 5) const; Matrix<T> to_matrix(const size_t &, const size_t &) const; double calculate_logistic_function(const Vector<double>&, const Vector<T>&) const; Vector<double> calculate_logistic_error_gradient(const Vector<double>&, const Vector<T>&) const; }; // CONSTRUCTORS /// Default constructor. It creates a vector of size zero. template <class T> Vector<T>::Vector() : vector<T>() {} /// General constructor. It creates a vector of size n, containing n copies of /// the default value for Type. /// @param new_size Size of vector. template <class T> Vector<T>::Vector(const size_t &new_size) : vector<T>(new_size) {} /// Constant reference initialization constructor. /// It creates a vector of size n, containing n copies of the type value of /// Type. /// @param new_size Size of Vector. /// @param value Initialization value of Type. template <class T> Vector<T>::Vector(const size_t &new_size, const T &value) : vector<T>(new_size, value) {} /// File constructor. It creates a vector object by loading its members from a /// data file. /// @param file_name Name of vector data file. template <class T> Vector<T>::Vector(const string &file_name) : vector<T>() { load(file_name); } /// Sequential constructor. template <class T> Vector<T>::Vector(const T &first, const double &step, const T &last) : vector<T>() { set(first, step, last); } /// Input iterators constructor template <class T> template <class InputIterator> Vector<T>::Vector(InputIterator first, InputIterator last) : vector<T>(first, last) {} /// Copy constructor. It creates a copy of an existing Vector. /// @param other_vector Vector to be copied. template <class T> Vector<T>::Vector(const Vector<T> &other_vector) : vector<T>(other_vector) {} template <class T> Vector<T>::Vector(const vector<T> &other_vector) : vector<T>(other_vector) {} template <class T> Vector<T>::Vector(const initializer_list<T> &list) : vector<T>(list) {} template <class T> Vector<T>::Vector(const Vector< Vector<T> >& vectors) { const size_t vectors_size = vectors.size(); size_t new_size = 0; for(size_t i = 0; i < vectors_size; i++) { new_size += vectors[i].size(); } set(new_size); size_t index = 0; for(size_t i = 0; i < vectors_size; i++) { for(size_t j = 0; j < vectors[i].size(); j++) { (*this)[index] = vectors[i][j]; index++; } } } /// Destructor. template <class T> Vector<T>::~Vector() { vector<T>().swap(*this); } /// Equal to operator between this vector and a Type value. /// It produces true if all the elements of this vector are equal to the Type /// value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Vector<T>::operator== (const T &value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] != value) { return(false); } } return(true); } /// Not equivalent relational operator between this vector and a Type value. /// It produces true if some element of this vector is not equal to the Type /// value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Vector<T>::operator!= (const T &value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] != value) { return(true); } } return(false); } /// Greater than relational operator between this vector and a Type value. /// It produces true if all the elements of this vector are greater than the /// Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Vector<T>::operator>(const T &value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] <= value) { return(false); } } return(true); } /// Less than relational operator between this vector and a Type value. /// It produces true if all the elements of this vector are less than the Type /// value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Vector<T>::operator<(const T &value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= value) { return(false); } } return(true); } /// Greater than or equal to than relational operator between this vector and a /// Type value. /// It produces true if all the elements of this vector are greater than or /// equal to the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Vector<T>::operator>= (const T &value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < value) { return(false); } } return(true); } /// Less than or equal to than relational operator between this vector and a /// Type value. /// It produces true if all the elements of this vector are less than or equal /// to the Type value, and false otherwise. /// @param value Type value to be compared with. template <class T> bool Vector<T>::operator<= (const T &value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] > value) { return(false); } } return(true); } // METHODS /// Sets the size of a vector to zero. template <class T> void Vector<T>::set() { this->resize(0); } /// Sets a new size to the vector. It does not initialize the data. /// @param new_size Size for the vector. template <class T> void Vector<T>::set(const size_t &new_size) { this->resize(new_size); } /// Sets a new size to the vector and initializes all its elements with a given /// value. /// @param new_size Size for the vector. /// @param new_value Value for all the elements. template <class T> void Vector<T>::set(const size_t &new_size, const T &new_value) { this->resize(new_size); initialize(new_value); } /// Sets all the members of a vector object by loading them from a data file. /// The format is specified in the OpenNN manual. /// @param file_name Name of vector data file. template <class T> void Vector<T>::set(const string &file_name) { load(file_name); } /// Makes this vector to have elements starting from a given value, continuing /// with a step value and finishing with a given value. /// Depending on the starting, step and finishing values, this method can produce /// a variety of sizes and data. /// @param first Starting value. /// @param step Step value. /// @param last Finishing value. template <class T> void Vector<T>::set(const T &first, const double &step, const T &last) { if(first > last && step > 0) { this->resize(0); } else if(first < last && step < 0) { this->resize(0); } else { const size_t new_size = 1 + static_cast<size_t>((last - first) / step + 0.5); this->resize(new_size); for(size_t i = 0; i < new_size; i++) { (*this)[i] = first + static_cast<T>(i * step); } } } /// Sets the members of this object with the values of another vector. /// @param other_vector Object to set this vector. template <class T> void Vector<T>::set(const Vector &other_vector) { *this = other_vector; } #ifdef __OPENNN_MPI__ // void set_MPI(const MPI_Datatype) method /// Send the vector to the other MPI processors. /// @param mpi_datatype MPI type of this vector. template <class T> void Vector<T>::set_MPI(const MPI_Datatype mpi_datatype) { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); int vector_size; if(rank == 0) { vector_size = static_cast<int>(this)->size(); } if(rank > 0) { MPI_Recv(&vector_size, 1, MPI_INT, rank - 1, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); set(vector_size); MPI_Recv(data(), vector_size, mpi_datatype, rank - 1, 2, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } if(rank < size - 1) { MPI_Send(&vector_size, 1, MPI_INT, rank + 1, 1, MPI_COMM_WORLD); MPI_Send(data(), vector_size, mpi_datatype, rank + 1, 2, MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); } #endif template <class T> T Vector<T>::get_first() const { return(*this)[0]; } template <class T> T Vector<T>::get_last() const { const size_t this_size = this->size(); return(*this)[this_size-1]; } template <class T> T Vector<T>::get_before_last() const { const size_t this_size = this->size(); return(*this)[this_size-2]; } template <class T> Vector<T> Vector<T>::delete_first(const size_t & elements_number) const { const size_t new_size = this->size() - elements_number; return get_last(new_size); } template <class T> Vector<T> Vector<T>::delete_last(const size_t & elements_number) const { const size_t new_size = this->size() - elements_number; return get_first(new_size); } /// Initializes all the elements of the vector with a given value. /// @param value Type value. template <class T> void Vector<T>::initialize(const T &value) { fill((*this).begin(),(*this).end(), value); } template <class T> void Vector<T>::initialize_first(const size_t& first, const T &value) { for(size_t i = 0; i < first; i++) (*this)[i] = value; } /// Initializes all the elements of the vector in a sequential order. template <class T> void Vector<T>::initialize_sequential() { for(size_t i = 0; i < this->size(); i++) { (*this)[i] = static_cast<T>(i); } } /// Assigns a random value comprised between a minimum value and a maximum value /// to each element in /// the vector. /// @param minimum Minimum initialization value. /// @param maximum Maximum initialization value. template <class T> void Vector<T>::randomize_uniform(const double &minimum, const double &maximum) { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(minimum > maximum) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void randomize_uniform(const double&, const double&) method.\n" << "Minimum value must be less or equal than maximum value.\n"; throw logic_error(buffer.str()); } #endif const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = static_cast<T>(calculate_random_uniform(minimum, maximum)); } } /// Assigns a random value comprised between given minimum and a maximum values /// to every element in the /// vector. /// @param minimums Minimum initialization values. /// @param maximums Maximum initialization values. template <class T> void Vector<T>::randomize_uniform(const Vector<double> &minimums, const Vector<double> &maximums) { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t minimums_size = minimums.size(); const size_t maximums_size = maximums.size(); if(minimums_size != this_size || maximums_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void randomize_uniform(const Vector<double>&, const " "Vector<double>&) method.\n" << "Minimum and maximum sizes must be equal to vector size.\n"; throw logic_error(buffer.str()); } if(minimums > maximums) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void randomize_uniform(const Vector<double>&, const " "Vector<double>&) method.\n" << "Minimum values must be less or equal than their corresponding " "maximum values.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { (*this)[i] = calculate_random_uniform(minimums[i], maximums[i]); } } /// Assigns random values to each element in the vector. /// These are taken from a normal distribution with single mean and standard /// deviation values for all the elements. /// @param mean Mean value of uniform distribution. /// @param standard_deviation Standard deviation value of uniform distribution. template <class T> void Vector<T>::randomize_normal(const double &mean, const double &standard_deviation) { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(standard_deviation < 0.0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void randomize_normal(const double&, const double&) method.\n" << "Standard deviation must be equal or greater than zero.\n"; throw logic_error(buffer.str()); } #endif const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = calculate_random_normal(mean, standard_deviation); } } /// Assigns random values to each element in the vector. /// These are taken from normal distributions with given means and standard /// deviations for each element. /// @param mean Mean values of normal distributions. /// @param standard_deviation Standard deviation values of normal distributions. template <class T> void Vector<T>::randomize_normal(const Vector<double> &mean, const Vector<double> &standard_deviation) { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t mean_size = mean.size(); const size_t standard_deviation_size = standard_deviation.size(); if(mean_size != this_size || standard_deviation_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void randomize_normal(const Vector<double>&, const " "Vector<double>&) method.\n" << "Mean and standard deviation sizes must be equal to vector size.\n"; throw logic_error(buffer.str()); } if(standard_deviation < 0.0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void randomize_normal(const Vector<double>&, const " "Vector<double>&) method.\n" << "Standard deviations must be equal or greater than zero.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { (*this)[i] = calculate_random_normal(mean[i], standard_deviation[i]); } } template <class T> void Vector<T>::randomize_binary(const double& negatives_ratio, const double& positives_ratio) { const size_t this_size = this->size(); if(this_size == 0) { return; } const double total_ratio = negatives_ratio + positives_ratio; // Get number of instances for training, selection and testing const size_t positives_number = static_cast<size_t>(positives_ratio*this_size/total_ratio); const size_t negatives_number = this_size - positives_number; Vector<size_t> indices(0, 1, this_size-1); random_shuffle(indices.begin(), indices.end()); size_t i = 0; size_t index; // Positives size_t count_positives = 0; while(count_positives != positives_number) { index = indices[i]; (*this)[index] = 1; count_positives++; i++; } // Positives size_t count_negatives = 0; while(count_negatives != negatives_number) { index = indices[i]; (*this)[index] = 0; count_negatives++; i++; } } template <class T> void Vector<T>::map(Vector<T>& other_vector, const T& this_value, const T& other_value) { const size_t this_size = this->size(); size_t index = this_size; for(size_t i = 0; i < this_size; i++) { if((*this)[i] == this_value) { index = i; break; } } if(index != this_size) { other_vector[index] = other_value; } } template <class T> void Vector<T>::map(Vector<T>& other_vector_1, Vector<T>& other_vector_2, const T& this_value, const T& other_value_1, const T& other_value_2) { const size_t this_size = this->size(); size_t index = this_size; for(size_t i = 0; i < this_size; i++) { if((*this)[i] == this_value) { index = i; break; } } if(index != this_size) { other_vector_1[index] = other_value_1; other_vector_2[index] = other_value_2; } } /// Removes whitespaces from the start and the end of each element in this vector of strings. template <class T> void Vector<T>::trim() { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { //prefixing spaces (*this)[i] = (*this)[i].erase(0,(*this)[i].find_first_not_of(' ')); //surfixing spaces (*this)[i] = (*this)[i].erase((*this)[i].find_last_not_of(' ') + 1); } } /// Returns a vector of strings that has whitespaces removed from the start and the end of each element. template <class T> Vector<T> Vector<T>::trimmed() const { Vector<T> new_vector(*this); new_vector.trim(); return(new_vector); } /// Returns true if the vector contains a certain value, and false otherwise. template <class T> bool Vector<T>::contains(const T &value) const { Vector<T> copy(*this); typename vector<T>::iterator it = find(copy.begin(), copy.end(), value); return(it != copy.end()); } template <class T> bool Vector<T>::contains_greater_than(const T &value) const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] > value) return true; } return false; } /// Returns true if the vector contains a certain value from a given set, and /// false otherwise. template <class T> bool Vector<T>::contains(const Vector<T> &values) const { if(values.empty()) { return(false); } Vector<T> copy(*this); const size_t values_size = values.size(); for(size_t j = 0; j < values_size; j++) { typename vector<T>::iterator it = find(copy.begin(), copy.end(), values[j]); if(it != copy.end()) { return(true); } } return(false); } template <class T> bool Vector<T>::has_same_elements(const Vector<T>& other_vector) const { const size_t this_size = this->size(); if(this_size != other_vector.size()) { return false; } for(size_t i = 0; i < this_size; i++) { if(!other_vector.contains((*this)[i])) { return false; } } return true; } /// Returns true if the value of all the elements fall in some given range, /// and false otherwise. /// @param minimum Minimum value of the range. /// @param maximum Maximum value of the range. template <class T> bool Vector<T>::is_in(const T &minimum, const T &maximum) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < minimum ||(*this)[i] > maximum) { return(false); } } return(true); } /// Returns true if all the elements have the same value within a defined /// tolerance , /// and false otherwise. /// @param tolerance Tolerance value, so that if abs(max-min) <= tol, then the /// vector is considered constant. template <class T> bool Vector<T>::is_constant(const double &tolerance) const { const size_t this_size = this->size(); if(this_size == 0) { return(false); } const T minimum = calculate_minimum(); const T maximum = calculate_maximum(); if(fabs(maximum - minimum) <= tolerance) { return(true); } else { return(false); } } /// Returns true if all the elements in this vector of strings are equal, and false otherwise. template <class T> bool Vector<T>::is_constant_string() const { const size_t this_size = this->size(); if(this_size == 0) { return(false); } for(size_t i = 1; i < this_size; i++) { if((*this)[i] != (*this)[0]) { return(false); } } return(true); } /// Returns true if all the elements in the vector have values which increase /// with the index, and false otherwise. template <class T> bool Vector<T>::is_crescent() const { for(size_t i = 0; i < this->size() - 1; i++) { if((*this)[i] >= (*this)[i + 1]) return(false); } return(true); } /// Returns true if all the elements in the vector have values which decrease /// with the index, and false otherwise. template <class T> bool Vector<T>::is_decrescent() const { for(size_t i = 0; i < this->size() - 1; i++) { if((*this)[i] <= (*this)[i + 1]) return(false); } return(true); } /// Returns true if all the elements of this vector are equal or greater than zero, and false otherwise. template <class T> bool Vector<T>::is_positive() const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] < 0.0) { return(false); } } return(true); } template <class T> bool Vector<T>::is_negative() const { for(size_t i = 0; i < this->size(); i++) { if((*this)[i] > 0.0) { return(false); } } return(true); } template <class T> bool Vector<T>::check_period(const double& period) const { for(size_t i = 1; i < this->size(); i++) { if((*this)[i] != (*this)[i-1] + period) { cout << "i: " << i << endl; cout << (*this)[i] << endl; cout << (*this)[i-1] << endl; cout << "Period: " << (*this)[i] - (*this)[i-1] << endl; return false; } } return true; } /// Returns true if all the elements in the vector have binary values, and false otherwise. template <class T> bool Vector<T>::is_binary() const { const size_t this_size = this->size(); Vector<T> values(1,(*this)[0]); for(size_t i = 1; i < this_size; i++) { const bool contains_value = values.contains((*this)[i]); if(!contains_value && values.size() == 1) { values.push_back((*this)[i]); } else if(!contains_value) { return false; } } return true; } /// Returns true if all the elements in the vector have binary values, and false otherwise. /// @param missing_indices Indices of the instances with missing values. template <class T> bool Vector<T>::is_binary(const Vector<size_t>& missing_indices) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { if((*this)[i] != 0 &&(*this)[i] != 1) { return false; } } } return true; } /// Returns true if all the elements in the vector are integers, and false otherwise. template <class T> bool Vector<T>::is_integer() const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if(floor((*this)[i]) != (*this)[i]) { return false; } } return true; } /// Returns true if all the elements in the vector are integers, and false otherwise. /// @param missing_indices Indices of the instances with missing values. template <class T> bool Vector<T>::is_integer(const Vector<size_t>& missing_indices) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { if(floor((*this)[i]) != (*this)[i]) { return false; } } } return true; } template <class T> bool Vector<T>::is_discrete(const size_t& maximum) const { Vector<T> values; for(size_t i = 0; i < this->size(); i++) { if(!values.contains((*this)[i])) { values.push_back((*this)[i]); if(values.size() > maximum) return false; } } return true; } template <class T> bool Vector<T>::is_discrete(const Vector<size_t>&, const size_t&) const { return true; } /// Returns true if the elements in the vector have a normal distribution with a given critical value. /// @param critical_value Critical value to be used in the test. template <class T> bool Vector<T>::perform_Lilliefors_normality_test(const double& critical_value) const { #ifndef __Cpp11__ const size_t n = this->size(); const double mean = this->calculate_mean(); const double standard_deviation = this->calculate_standard_deviation(); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); double Fx; double Snx; double D = -1; for(size_t i = 0; i < n; i++) { Fx = 0.5 * erfc((mean -(*this)[i])/(standard_deviation*sqrt(2))); if((*this)[i] < sorted_vector[0]) { Snx = 0.0; } else if((*this)[i] >= sorted_vector[n-1]) { Snx = 1.0; } else { for(size_t j = 0; j < n-1; j++) { if((*this)[i] >= sorted_vector[j] &&(*this)[i] < sorted_vector[j+1]) { Snx = static_cast<double>(j+1)/static_cast<double>(n); } } } if(D < abs(Fx - Snx)) { D = abs(Fx - Snx); } } if(D < critical_value) { return true; } else { return false; } #else return false; #endif } /// Returns true if the elements in the vector have a normal distribution with a given set of critical values. /// @param critical_values Critical values to be used in the test. template <class T> Vector<bool> Vector<T>::perform_Lilliefors_normality_test(const Vector<double>& critical_values) const { #ifndef __Cpp11__ const size_t n = this->size(); const double mean = this->calculate_mean(); const double standard_deviation = this->calculate_standard_deviation(); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); double Fx; double Snx; double D = -1; for(size_t i = 0; i < n; i++) { Fx = 0.5 * erfc((mean -(*this)[i])/(standard_deviation*sqrt(2))); if((*this)[i] < sorted_vector[0]) { Snx = 0.0; } else if((*this)[i] >= sorted_vector[n-1]) { Snx = 1.0; } else { for(size_t j = 0; j < n-1; j++) { if((*this)[i] >= sorted_vector[j] &&(*this)[i] < sorted_vector[j+1]) { Snx = static_cast<double>(j+1) / static_cast<double>(n); } } } if(D < abs(Fx - Snx)) { D = abs(Fx - Snx); } } Vector<bool> normality_test_results(critical_values.size()); for(size_t i = 0; i < critical_values.size(); i++) { if(D < critical_values[i]) { normality_test_results[i] = true; } else { normality_test_results[i] = false; } } return normality_test_results; #else return normality_test_results; #endif } /// Calculates the distance between the empirical distribution of the vector and the /// normal distribution. template <class T> double Vector<T>::calculate_normal_distribution_distance() const { double normal_distribution_distance = 0.0; const size_t n = this->size(); const double mean = this->calculate_mean(); const double standard_deviation = this->calculate_standard_deviation(); double normal_distribution; // Normal distribution double empirical_distribution; // Empirical distribution Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); size_t counter = 0; for(size_t i = 0; i < n; i++) { normal_distribution = 0.5 * erfc((mean - sorted_vector[i])/(standard_deviation*sqrt(2.0))); counter = 0; for(size_t j = 0; j < n; j++) { if(sorted_vector[j] <= sorted_vector[i]) { counter++; } else { break; } } empirical_distribution = static_cast<double>(counter)/static_cast<double>(n); normal_distribution_distance += abs(normal_distribution - empirical_distribution); } return normal_distribution_distance; } /// Calculates the distance between the empirical distribution of the vector and the /// half normal distribution. template <class T> double Vector<T>::calculate_half_normal_distribution_distance() const { double half_normal_distribution_distance = 0.0; const size_t n = this->size(); const double standard_deviation = this->calculate_standard_deviation(); double half_normal_distribution; // Half normal distribution double empirical_distribution; // Empirical distribution Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); size_t counter = 0; for(size_t i = 0; i < n; i++) { half_normal_distribution = erf((sorted_vector[i])/(standard_deviation * sqrt(2))); counter = 0; for(size_t j = 0; j < n; j++) { if(sorted_vector[j] <= sorted_vector[i]) { counter++; } else { break; } } empirical_distribution = static_cast<double>(counter)/static_cast<double>(n); half_normal_distribution_distance += abs(half_normal_distribution - empirical_distribution); } return half_normal_distribution_distance; } /// Calculates the distance between the empirical distribution of the vector and the /// uniform distribution. template <class T> double Vector<T>::calculate_uniform_distribution_distance() const { double uniform_distribution_distance = 0.0; const size_t n = this->size(); double uniform_distribution; // Uniform distribution double empirical_distribution; // Empirical distribution Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); const double minimum = sorted_vector[0]; const double maximum = sorted_vector[n-1]; size_t counter = 0; for(size_t i = 0; i < n; i++) { uniform_distribution = (sorted_vector[i]-minimum)/(maximum-minimum); counter = 0; for(size_t j = 0; j < n; j++) { if(sorted_vector[j] <= sorted_vector[i]) { counter++; } else { break; } } empirical_distribution = static_cast<double>(counter)/static_cast<double>(n); uniform_distribution_distance += abs(uniform_distribution - empirical_distribution); } return uniform_distribution_distance; } /// Performs the Lilliefors normality tests varying the confindence level from 0.05 to 0.5. /// It returns a vector containing the results of the tests. template <class T> Vector<bool> Vector<T>::perform_normality_analysis() const { const size_t size = this->size(); double significance_level = 0.05; double A_significance_level; double B_significance_level; Vector<double> critical_values(9); for(size_t i = 0; i < 9; i++) { A_significance_level = 6.32207539843126 - 17.1398870006148*(1 - significance_level) + 38.42812675101057*pow((1 - significance_level),2) - 45.93241384693391*pow((1 - significance_level),3) + 7.88697700041829*pow((1 - significance_level),4) + 29.79317711037858*pow((1 - significance_level),5) - 18.48090137098585*pow((1 - significance_level),6); B_significance_level = 12.940399038404 - 53.458334259532*(1 - significance_level) + 186.923866119699*pow((1 - significance_level),2) - 410.582178349305*pow((1 - significance_level),3) + 517.377862566267*pow((1 - significance_level),4) - 343.581476222384*pow((1 - significance_level),5) + 92.123451358715*pow((1 - significance_level),6); critical_values[i] = sqrt(1/(A_significance_level*size+B_significance_level)); significance_level += 0.05; } return this->Lilliefors_normality_test(critical_values); } /// @todo template <class T> double Vector<T>::calculate_normality_parameter() const { const double maximum = this->calculate_maximum(); const double minimum = this->calculate_minimum(); const size_t n = this->size(); const double mean = this->calculate_mean(); const double standard_deviation = this->calculate_standard_deviation(); double normal_distribution; double empirical_distribution; double previous_normal_distribution; double previous_empirical_distribution; Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); double empirical_area = 0.0; double normal_area = 0.0; size_t counter = 0; for(size_t i = 0; i < n; i++) { normal_distribution = 0.5 * erfc((mean - sorted_vector[i])/(standard_deviation*sqrt(2.0))); counter = 0; for(size_t j = 0; j < n; j++) { if(sorted_vector[j] <= sorted_vector[i]) { counter++; } else { break; } } empirical_distribution = static_cast<double>(counter)/static_cast<double>(n); if(i == 0) { previous_normal_distribution = normal_distribution; previous_empirical_distribution = empirical_distribution; } else { normal_area += 0.5*(sorted_vector[i]-sorted_vector[i-1])*(normal_distribution+previous_normal_distribution); empirical_area += 0.5*(sorted_vector[i]-sorted_vector[i-1])*(empirical_distribution+previous_empirical_distribution); previous_normal_distribution = normal_distribution; previous_empirical_distribution = empirical_distribution; } } const double uniform_area = (maximum-minimum)/2.0; return uniform_area; } template <class T> Vector<T> Vector<T>::calculate_variation_percentage() const { const size_t this_size = this->size(); Vector<T> new_vector(this_size, 0); for(size_t i = 1; i < this_size; i++) { if((*this)[i-1] != 0) { new_vector[i] = ((*this)[i] - (*this)[i-1])*100/(*this)[i-1]; } } return new_vector; } /// Calculates the distance between the empirical distribution of the vector and /// the normal, half-normal and uniform cumulative distribution. It returns 0, 1 /// or 2 if the closest distribution is the normal, half-normal or the uniform, /// respectively. template <class T> size_t Vector<T>::perform_distribution_distance_analysis() const { Vector<double> distances(3, 0.0); const size_t n = this->size(); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); const Statistics<T> statistics = this->calculate_statistics(); const double mean = statistics.mean; const double standard_deviation = statistics.standard_deviation; const double minimum = sorted_vector[0]; const double maximum = sorted_vector[n-1]; #pragma omp parallel for schedule(dynamic) for(int i = 0; i < static_cast<int>(n); i++) { const double normal_distribution = 0.5 * erfc((mean - sorted_vector[i])/(standard_deviation*sqrt(2))); const double half_normal_distribution = erf((sorted_vector[i])/(standard_deviation * sqrt(2))); const double uniform_distribution = (sorted_vector[i]-minimum)/(maximum-minimum); double empirical_distribution; size_t counter = 0; if((*this)[i] < sorted_vector[0]) { empirical_distribution = 0.0; } else if((*this)[i] >= sorted_vector[n-1]) { empirical_distribution = 1.0; } else { counter = static_cast<size_t>(i + 1); for(int j = i+1; j < n; j++) { if(sorted_vector[j] <= sorted_vector[i]) { counter++; } else { break; } } empirical_distribution = static_cast<double>(counter)/static_cast<double>(n); } #pragma omp critical { distances[0] += abs(normal_distribution - empirical_distribution); distances[1] += abs(half_normal_distribution - empirical_distribution); distances[2] += abs(uniform_distribution - empirical_distribution); } } return distances.calculate_minimal_index(); } /// Calculates the distance between the empirical distribution of the vector and /// the normal, half-normal and uniform cumulative distribution. It returns 0, 1 /// or 2 if the closest distribution is the normal, half-normal or the uniform, /// respectively. template <class T> size_t Vector<T>::perform_distribution_distance_analysis_missing_values(const Vector<size_t>& missing_indices) const { Vector<double> distances(3, 0.0); double normal_distribution; // Normal distribution double half_normal_distribution; // Half-normal distribution double uniform_distribution; // Uniform distribution double empirical_distribution; // Empirical distribution Vector<size_t> used_indices(1,1,this->size()); used_indices = used_indices.get_difference(missing_indices); const Vector<T> used_values = this->get_subvector(used_indices); const size_t n = used_values.size(); Vector<T> sorted_vector(used_values); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); Statistics<T> statistics = used_values.calculate_statistics(); const double mean = statistics.mean; const double standard_deviation = statistics.standard_deviation; const double minimum = sorted_vector[0]; const double maximum = sorted_vector[n-1]; if(fabs(minimum - maximum) < numeric_limits<double>::epsilon() || standard_deviation < 1.0e-09) { return 2; } size_t counter = 0; #pragma omp parallel for private(empirical_distribution, normal_distribution, half_normal_distribution, uniform_distribution, counter) for(int i = 0; i < n; i++) { normal_distribution = 0.5 * erfc((mean - sorted_vector[i])/(standard_deviation*sqrt(2))); half_normal_distribution = erf((sorted_vector[i])/(standard_deviation * sqrt(2))); uniform_distribution = (sorted_vector[i]-minimum)/(maximum-minimum); counter = 0; for(size_t j = 0; j < n; j++) { if(sorted_vector[j] <= sorted_vector[i]) { counter++; } else { break; } } empirical_distribution = static_cast<double>(counter)/static_cast<double>(n); #pragma omp critical { distances[0] += abs(normal_distribution - empirical_distribution); distances[1] += abs(half_normal_distribution - empirical_distribution); distances[2] += abs(uniform_distribution - empirical_distribution); } } return distances.calculate_minimal_index(); } template <class T> int Vector<T>::get_lower_index(const size_t& index, const T& value) const { if(index != 0) { for(int i = static_cast<int>(index)-1; i > -1; i--) { if((*this)[i] != value) { return static_cast<int>(i); } } } return(-1); } template <class T> int Vector<T>::get_upper_index(const size_t& index, const T& value) const { const size_t this_size = this->size(); if(index != this_size-1) { for(int i = static_cast<int>(index)+1; i < static_cast<int>(this_size); i++) { if((*this)[i] != value) { return static_cast<int>(i); } } } return(-1); } template <class T> Vector<T> Vector<T>::get_reverse() const { const size_t this_size = this->size(); Vector<T> reverse(this_size); for(size_t i = 0; i < this_size; i++) { reverse[i] = (*this)[this_size - 1 - i]; } return reverse; } template <class T> Vector<T> Vector<T>::impute_time_series_missing_values_mean(const T& value) const { const size_t this_size = this->size(); Vector<T> new_vector(*this); int lower_index; int upper_index; for(size_t i = 0; i < this_size; i++) { if((*this)[i] == value) { lower_index = get_lower_index(i, value); upper_index = get_upper_index(i, value); if(lower_index != -1 && upper_index != -1) { new_vector[i] = (new_vector[upper_index] + new_vector[lower_index])/2.0; } else if(lower_index != -1 && upper_index == -1) { new_vector[i] = new_vector[lower_index]; } else if(lower_index == -1 && upper_index != -1) { new_vector[i] = new_vector[upper_index]; } else { cout << "Error: impute_time_series_missing_values_mean" << endl; } } } return(new_vector); } /// Replaces a substring by another one in each element of this vector. /// @param find_what String to be replaced. /// @param replace_with String to be put instead. template <class T> void Vector<T>::replace_substring(const string& find_what, const string& replace_with) { const size_t size = this->size(); for(size_t i = 0; i < size; i++) { size_t position = 0; while((position = (*this)[i].find(find_what, position)) != string::npos) { (*this)[i].replace(position, find_what.length(), replace_with); position += replace_with.length(); } } } /// Returns the number of times that a certain value is contained in the vector. /// @param value Value to be counted. template <class T> size_t Vector<T>::count_equal_to(const T &value) const { return count(this->begin(), this->end(), value); } template <class T> double Vector<T>::count_equal_to(const T &value, const Vector<double>& weights) const { double count = 0.0; for (size_t i = 0; i < this->size(); i++) { if ((*this)[i] == value) { count += weights[i]; } } return count; } /// Returns the number of times that certain values are contained in the vector. /// @param values Vector of values to be counted. template <class T> size_t Vector<T>::count_equal_to(const Vector<T> &values) const { Vector<T> this_copy(*this); pair<typename vector<T>::iterator,typename vector<T>::iterator> bounds; sort(this_copy.begin(), this_copy.end()); const size_t values_size = values.size(); size_t count = 0; #pragma omp parallel for private(bounds) reduction(+ : count) for(int i = 0; i < static_cast<int>(values_size); i++) { bounds = equal_range(this_copy.begin(), this_copy.end(), values[i]); count += ((bounds.second - this_copy.begin()) - (bounds.first - this_copy.begin())); } return(count); } /// Returns the number of elemements that are not equal to a certain value. /// @param value Value. template <class T> size_t Vector<T>::count_not_equal_to(const T &value) const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] != value) { count++; } } return(count); } /// Returns the number of elemements that are not equal to certain values. /// @param values Vector of values. template <class T> size_t Vector<T>::count_not_equal_to(const Vector<T> &values) const { const size_t this_size = this->size(); const size_t equal_to_count = count_equal_to(values); return(this_size - equal_to_count); } /// Returns the number of elements that are equal or greater than zero. template <class T> size_t Vector<T>::count_positive() const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] > 0) { count++; } } return(count); } template <class T> size_t Vector<T>::count_negative() const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] < 0) { count++; } } return(count); } //template <class T> //Vector<double> Vector<T>::get_binary_vector(const Vector<T>& unique_items) const //{ // const size_t unique_items_number = unique_items.size(); // Vector<double> binary_vector(unique_items_number); // for(size_t i = 0; i < unique_items_number; i++) // { // if(this->contains(unique_items[i])) // { // binary_vector[i] = 1.0; // } // else // { // binary_vector[i] = 0.0; // } // } // return(binary_vector); //} //template <class T> //Matrix<T> Vector<T>::get_binary_matrix(const char& separator) const //{ // const size_t this_size = this->size(); // const Vector<T> unique_mixes = get_unique_elements(); // Vector< Vector<T> > items(unique_mixes.size()); // Vector<T> unique_items; // for(int i = 0; i < unique_mixes.size(); i++) // { // items[i] = unique_mixes.split_element(i, separator); // unique_items = unique_items.assemble(items[i]).get_unique_elements(); // } // const size_t unique_items_number = unique_items.size(); // Matrix<T> binary_matrix(this_size, unique_items_number, 0.0); // Vector<string> elements; // Vector<double> binary_items(unique_items_number); // for(size_t i = 0; i < this_size; i++) // { // elements = split_element(i, separator); // binary_items = elements.get_binary_vector(unique_items); // binary_matrix.set_row(i, binary_items.to_string_vector()); // } // binary_matrix.set_header(unique_items); // return(binary_matrix); //} ///// Returns a binary matrix indicating the elements of the columns. //template <class T> //Matrix<T> Vector<T>::get_unique_binary_matrix(const char& separator, const Vector<T>& unique_items) const //{ // const size_t this_size = this->size(); // const size_t unique_items_number = unique_items.size(); // Matrix<T> binary_matrix(this_size, unique_items_number,0.0); // binary_matrix.set_header(unique_items.to_string_vector()); // Vector<string> elements; // Vector<double> binary_items(unique_items_number); // for(size_t i = 0; i < this_size; i++) // { // elements = split_element(i, separator); // binary_items = elements.get_binary_vector(unique_items); // binary_matrix.set_row(i, binary_items.to_string_vector()); // } // return(binary_matrix); //} template <class T> Vector<T> Vector<T>::filter_equal_to(const T& value) const { const size_t this_size = this->size(); const size_t new_size = count_equal_to(value); Vector<T> new_vector(new_size); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] == value) { new_vector[count] = (*this)[i]; index++; } } return(new_vector); } /// Returns the elements that are different to a given value. /// @param value Comparison value. template <class T> Vector<T> Vector<T>::filter_not_equal_to(const T& value) const { const size_t this_size = this->size(); const size_t new_size = count_not_equal_to(value); Vector<T> new_vector(new_size); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] != value) { new_vector[index] = (*this)[i]; index++; } } return(new_vector); } template <class T> Vector<T> Vector<T>::filter_equal_to(const Vector<T>& values) const { const Vector<size_t> indices = calculate_equal_to_indices(values); return get_subvector(indices); } template <class T> Vector<T> Vector<T>::filter_not_equal_to(const Vector<T>& values) const { const Vector<size_t> indices = calculate_not_equal_to_indices(values); return get_subvector(indices); } /// Returns a vector containing the elements of this vector which are equal or greater than zero. template <class T> Vector<T> Vector<T>::get_positive_elements() const { const size_t this_size = this->size(); const size_t new_size = count_positive(); Vector<T> new_vector(new_size); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= 0) { new_vector[count] = (*this)[i]; count++; } } return(new_vector); } /// Returns the number of different integers in the vector or 0 if the number of different /// integers in the vector is greater than a given numbe of if there are numbers in the /// vector which are not integers. /// @param maximum_integers Maximum number of different integers to count. template <class T> size_t Vector<T>::count_integers(const size_t& maximum_integers) const { if(!this->is_integer()) { return 0; } const size_t this_size = this->size(); Vector<T> integers; size_t integers_count = 0; for(size_t i = 0; i < this_size; i++) { if(!integers.contains((*this)[i])) { integers.push_back((*this)[i]); integers_count++; } if(integers_count > maximum_integers) { return 0; } } return integers_count; } /// Returns the number of different integers in the vector or 0 if the number of different /// integers in the vector is greater than a given numbe of if there are numbers in the /// vector which are not integers. /// @param missing_indices Indices of the instances with missing values. /// @param maximum_integers Maximum number of different integers to count. template <class T> size_t Vector<T>::count_integers_missing_values(const Vector<size_t>& missing_indices, const size_t& maximum_integers) const { if(!this->is_integer(missing_indices)) { return 0; } const size_t this_size = this->size(); Vector<T> integers; size_t integers_count = 0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { if(!integers.contains((*this)[i])) { integers.push_back((*this)[i]); integers_count++; } if(integers_count > maximum_integers) { return 0; } } } return integers_count; } template <class T> Vector<size_t> Vector<T>::calculate_between_indices(const T& minimum, const T& maximum) const { const size_t this_size = this->size(); const size_t new_size = count_between(minimum, maximum); Vector<size_t> indices(new_size); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= minimum &&(*this)[i] <= maximum) { indices[index] = i; index++; } } return indices; } /// Returns the vector indices at which the vector elements take some given value. /// @param value Value. template <class T> Vector<size_t> Vector<T>::calculate_equal_to_indices(const T &value) const { const size_t this_size = this->size(); const size_t occurrences_number = count_equal_to(value); if(occurrences_number == 0) { Vector<size_t> occurrence_indices; return(occurrence_indices); } Vector<size_t> occurrence_indices(occurrences_number); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] == value) { occurrence_indices[index] = i; index++; } } return(occurrence_indices); } /// Returns the indices of the elements that are equal to given values. /// @param values Vector of values. template <class T> Vector<size_t> Vector<T>::calculate_equal_to_indices(const Vector<T>&values) const { const size_t this_size = this->size(); const size_t occurrences_number = count_equal_to(values); if(occurrences_number == 0) { Vector<size_t> occurrence_indices; return(occurrence_indices); } Vector<size_t> occurrence_indices(occurrences_number); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if(values.contains((*this)[i])) { occurrence_indices[index] = i; index++; } } return(occurrence_indices); } /// Returns the indices of the elements that are not equal to a given value. /// @param value Element value. template <class T> Vector<size_t> Vector<T>::calculate_not_equal_to_indices(const T &value) const { const size_t this_size = this->size(); const size_t not_equal_to_count = count_not_equal_to(value); Vector<size_t> not_equal_to_indices(not_equal_to_count); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] != value) { not_equal_to_indices[index] = i; index++; } } return(not_equal_to_indices); } /// Returns the indices of the elements that are not equal to given values. /// @param values Vector of values. template <class T> Vector<size_t> Vector<T>::calculate_not_equal_to_indices(const Vector<T> &values) const { const size_t this_size = this->size(); const size_t occurrences_number = count_not_equal_to(values); if(occurrences_number == 0) return Vector<size_t>(); Vector<size_t> occurrence_indices(occurrences_number); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if(!values.contains((*this)[i])) { occurrence_indices[index] = i; index++; } } return(occurrence_indices); } template <class T> Vector<size_t> Vector<T>::get_indices_equal_to(const T &value) const { const size_t this_size = this->size(); const size_t equal_to_count = count_equal_to(value); Vector<size_t> equal_to_indices(equal_to_count); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] == value) { equal_to_indices[index] = i; index++; } } return(equal_to_indices); } /// Returns the indices of the elements which are less than a given value. /// @param value Value. template <class T> Vector<size_t> Vector<T>::get_indices_less_than(const T &value) const { const size_t this_size = this->size(); const size_t less_than_count = count_less_than(value); Vector<size_t> less_than_indices(less_than_count); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] < value) { less_than_indices[index] = i; index++; } } return(less_than_indices); } /// Returns the indices of the elements which are less than a given value. /// @param value Value. template <class T> Vector<size_t> Vector<T>::get_indices_greater_than(const T &value) const { const size_t this_size = this->size(); const size_t count = count_greater_than(value); Vector<size_t> indices(count); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] > value) { indices[index] = i; index++; } } return(indices); } /// Returns the number of elements which are greater than some given value. /// @param value Value. template <class T> size_t Vector<T>::count_greater_than(const T &value) const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] > value) { count++; } } return(count); } /// Returns the number of elements which are less than some given value. /// @param value Value. template <class T> size_t Vector<T>::count_less_than(const T &value) const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] < value) { count++; } } return(count); } /// Returns the number of elements which are greater than or equal to some given value. /// @param value Value. template <class T> size_t Vector<T>::count_greater_equal_to(const T &value) const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= value) { count++; } } return(count); } /// Returns the number of elements which are less or equal than some given value. /// @param value Value. template <class T> size_t Vector<T>::count_less_equal_to(const T &value) const { const size_t count = count_if(this->begin(), this->end(), [value](T elem){ return elem <= value; }); return(count); } /// Returns the number of elements which are equal or greater than a minimum value /// and equal or less than a maximum value. /// @param minimum Minimum value. /// @param maximum Maximum value. template <class T> size_t Vector<T>::count_between(const T &minimum, const T &maximum) const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= minimum &&(*this)[i] <= maximum) count++; } return(count); } /// Returns the number of elements in this timestamp vector which correspond to a given year. /// @param year Year. template <class T> size_t Vector<T>::count_date_occurrences(const size_t& year) const { const size_t this_size = this->size(); time_t time; struct tm* date_info; size_t count = 0; for(size_t i = 0; i < this_size; i++) { time = (*this)[i]; date_info = gmtime(&time); if(date_info->tm_year+1900 == year) { count++; } } return(count); } /// Returns a matrix with the date occurrences in this vector of timestamps. /// Data goes from the first to the last date in this vector. /// The first column is the day of the month. /// The second column is the month. /// The third column is the year. /// The fourth column is the number of occurrences for that date. template <class T> Matrix<T> Vector<T>::count_daily_series_occurrences() const { const time_t start_date = calculate_minimum(); const time_t end_date = calculate_maximum(); const size_t day_seconds = 60*60*24; const size_t days_number = static_cast<size_t>(difftime(end_date, start_date))/day_seconds; Matrix<T> count(days_number, 4, 0); time_t date; struct tm* date_info; size_t day; size_t month; size_t year; for(size_t i = 0; i < days_number; i++) { date = start_date + i*day_seconds; date_info = gmtime(&date); day = static_cast<size_t>(date_info->tm_mday); month = static_cast<size_t>(date_info->tm_mon+1); year = static_cast<size_t>(date_info->tm_year+1900); count(i, 0) = day; count(i, 1) = month; count(i, 2) = year; } const size_t this_size = this->size(); size_t row_index; for(size_t i = 0; i < this_size; i++) { date = (*this)[i]; row_index = static_cast<size_t>(difftime(date, start_date))/day_seconds; count(row_index, 3)++; } return(count); } /// Returns a matrix with the weekly occurrences in this vector of timestamps. /// Data goes from the first to the last date in this vector. /// The first column is the week of the year. /// The second column is the year. /// The third column is the number of occurrences for that week. template <class T> Matrix<T> Vector<T>::count_weekly_series_occurrences() const { const time_t start_date = calculate_minimum(); const time_t end_date = calculate_maximum(); const size_t week_seconds = 60*60*24*7; const size_t weeks_number = static_cast<size_t>(difftime(end_date, start_date))/week_seconds; Matrix<T> count(weeks_number, 3, 0); time_t date; struct tm* date_info; size_t year; size_t week; char buffer[64]; for(size_t i = 0; i < weeks_number; i++) { date = start_date + i*week_seconds; date_info = gmtime(&date); if(strftime(buffer, sizeof buffer, "%W", date_info) != 0) { week = static_cast<size_t>(atoi(buffer)); } else { cout << "Unkown week number" << endl; } year = static_cast<size_t>(date_info->tm_year+1900); count(i, 0) = week; count(i, 1) = year; } const size_t this_size = this->size(); size_t row_index; for(size_t i = 0; i < this_size; i++) { date = (*this)[i]; row_index = static_cast<size_t>(difftime(date, start_date))/week_seconds; count(row_index, 2)++; } return(count); } /// Returns a matrix with the month occurrences in this vector of timestamps. /// Data goes from the first to the last date in this vector. /// The first column is the month. /// The second column is the year. /// The third column is the number of occurrences for that month. template <class T> Matrix<T> Vector<T>::count_monthly_series_occurrences() const { const time_t start_date = calculate_minimum(); const time_t end_date = calculate_maximum(); time_t time; struct tm* date_info; date_info = gmtime(&start_date); const int start_month = date_info->tm_mon+1; const int start_year = date_info->tm_year+1900; date_info = gmtime(&end_date); const int end_month = date_info->tm_mon+1; const int end_year = date_info->tm_year+1900; const size_t months_number = static_cast<size_t>((end_year-start_year)*12 + end_month - start_month + 1); Matrix<T> count(months_number, 4, 0); size_t month; size_t year; size_t division; for(size_t i = 0; i < months_number; i++) { // Month month = static_cast<size_t>(start_month) + i; division = (month-1)/12; if(month > 12) { month = month -(12 * division); } count(i, 0) = month; // Year year = static_cast<size_t>(start_year) + (i + static_cast<size_t>(start_month) - 1)/12; count(i, 1) = year; } const size_t this_size = this->size(); size_t row_index; for(size_t i = 0; i < this_size; i++) { time = (*this)[i]; date_info = gmtime(&time); month = static_cast<size_t>(date_info->tm_mon+1); year = static_cast<size_t>(date_info->tm_year+1900); row_index = (year-static_cast<size_t>(start_year))*12 + month - static_cast<size_t>(start_month); count(row_index, 2)++; } for(size_t i = 0; i < months_number; i++) { count(i, 3) = count(i, 2)*100.0/this_size; } return(count); } /// Returns a matrix with the yearly occurrences in this vector of timestamps. /// Data goes from the first to the last date in this vector. /// The first column is the year. /// The fourth column is the number of occurrences for that year. template <class T> Matrix<T> Vector<T>::count_yearly_series_occurrences() const { const time_t start_date = calculate_minimum(); const time_t end_date = calculate_maximum(); struct tm* date_info; date_info = gmtime(&start_date); const int start_year = date_info->tm_year+1900; date_info = gmtime(&end_date); const int end_year = date_info->tm_year+1900; const size_t years_number = static_cast<size_t>(end_year-start_year+1); Matrix<T> yearly_orders(years_number, 2); for(size_t i = 0; i < years_number; i++) { const size_t year = static_cast<size_t>(start_year) + i; const size_t orders_number = count_date_occurrences(year); yearly_orders(i, 0) = year; yearly_orders(i, 1) = orders_number; } return(yearly_orders); } /// Returns a matrix with the monthly occurrences in this vector of timestamps, between a stard date and an end date. /// The first column is the month. /// The second column is the year. /// The third column is the number of occurrences for that month. /// @param start_month Start month. /// @param start_year Start year. /// @param end_month End month. /// @param end_year End year. template <class T> Matrix<T> Vector<T>::count_monthly_series_occurrences(const size_t& start_month, const size_t& start_year, const size_t& end_month, const size_t& end_year) const { time_t time; struct tm* date_info; const size_t months_number = (end_year-start_year)*12 + end_month - start_month + 1; Matrix<T> count(months_number, 3, 0); size_t month; size_t year; size_t division; for(size_t i = 0; i < months_number; i++) { // Month month = start_month + i; division = (month-1)/12; if(month > 12) { month = month -(12 * division); } count(i, 0) = month; // Year year = start_year + (i+start_month-1)/12; count(i, 1) = year; } const size_t this_size = this->size(); size_t row_index; for(size_t i = 0; i < this_size; i++) { time = (*this)[i]; date_info = gmtime(&time); month = static_cast<size_t>(date_info->tm_mon+1); year = static_cast<size_t>(date_info->tm_year+1900); row_index = (year-start_year)*12 + month - start_month; count(row_index, 2)++; } return(count); } /// Performs the monthly analysis per year with the input vector. /// It returns a matrix containing 12 rows and the number of columns equal to years number. template <class T> Matrix<T> Vector<T>::count_monthly_occurrences() const { const time_t start_date = calculate_minimum(); const time_t end_date = calculate_maximum(); struct tm* date_info; date_info = gmtime(&start_date); const int start_year = date_info->tm_year+1900; date_info = gmtime(&end_date); const int end_year = date_info->tm_year+1900; const size_t months_number = 12; const size_t years_number = static_cast<size_t>(end_year - start_year + 1); Matrix<T> count(months_number, years_number+1, 0); for(size_t i = 0; i < months_number; i++) { size_t month = i + 1; count(i, 0) = static_cast<double>(month); for(size_t j = 0; j < years_number; j++) { size_t year = static_cast<size_t>(start_year) + j; const size_t orders_number = count_date_occurrences(month, year); count(i,j+1) = static_cast<double>(orders_number); } } return(count); } /// Returns the number of elements in this timestamp vector which correspond to a given month. /// @param month Month. template <class T> size_t Vector<T>::count_month_occurrences(const size_t& month) const { const size_t this_size = this->size(); time_t time; struct tm* date_info; size_t count = 0; for(size_t i = 0; i < this_size; i++) { time = (*this)[i]; date_info = gmtime(&time); if(date_info->tm_mon+1 == month) { count++; } } return(count); } /// Returns the number of elements in this timestamp vector which correspond to a given date. /// @param month Month. /// @param year Year. template <class T> size_t Vector<T>::count_date_occurrences(const size_t& month, const size_t& year) const { const size_t this_size = this->size(); time_t time; struct tm* date_info; size_t count = 0; for(size_t i = 0; i < this_size; i++) { time = (*this)[i]; date_info = gmtime(&time); if(date_info->tm_mon+1 == month && date_info->tm_year+1900 == year) { count++; } } return(count); } /// Returns the number of elements in this string vector which contains a given substring. /// @param find_what Substring to be found. template <class T> size_t Vector<T>::count_contains(const string& find_what) const { const size_t this_size = this->size(); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i].find(find_what) != string::npos) { count++; } } return(count); } /// Appends a different string onto the end of each string in this vector. /// @param other_vector Vector of strings to be appended. /// @param separator Delimiter char between both strings. template <class T> Vector<T> Vector<T>::merge(const Vector<T>& other_vector, const char& separator) const { const size_t this_size = this->size(); Vector<T> merged(this_size); for(size_t i = 0; i < this_size; i++) { merged[i] = (*this)[i] + separator + other_vector[i]; } return(merged); } /// Returns the elements that are equal or greater than a minimum value /// and less or equal to a maximum value. /// @param minimum Minimum value. /// @param maximum Maximum value. template <class T> Vector<T> Vector<T>::filter_minimum_maximum(const T &minimum, const T &maximum) const { const size_t this_size = this->size(); const size_t new_size = count_between(minimum, maximum); Vector<T> new_vector(new_size); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= minimum && (*this)[i] <= maximum) { new_vector[count] = (*this)[i]; count++; } } return(new_vector); } /// Returns the vector indices of the elemnts that contains the given value. /// @param find_what String to find in the vector. template <class T> Vector<size_t> Vector<T>::calculate_contains_indices(const string& find_what) const { const size_t this_size = this->size(); Vector<size_t> indices; for(size_t i = 0; i < this_size; i++) { if((*this)[i].find(find_what) != string::npos) { indices.push_back(i); } } return(indices); } /// Returns the vector indices at which the vector elements are less than some /// given value. /// @param value Value. template <class T> Vector<size_t> Vector<T>::calculate_less_than_indices(const T &value) const { const size_t this_size = this->size(); Vector<size_t> less_than_indices; for(size_t i = 0; i < this_size; i++) { if((*this)[i] < value) { less_than_indices.push_back(i); } } return(less_than_indices); } /// Returns the vector indices at which the vector elements are greater than /// some given value. /// @param value Value. template <class T> Vector<size_t> Vector<T>::calculate_greater_than_indices(const T &value) const { const size_t this_size = this->size(); Vector<size_t> greater_than_indices; for(size_t i = 0; i < this_size; i++) { if((*this)[i] > value) { greater_than_indices.push_back(i); } } return(greater_than_indices); } /// Returns the indices of the elements which are less or equal to a given value. /// @param Value Comparison value. template <class T> Vector<size_t> Vector<T>::calculate_less_equal_to_indices(const T &value) const { const size_t this_size = this->size(); Vector<size_t> less_than_indices; for(size_t i = 0; i < this_size; i++) { if((*this)[i] <= value) { less_than_indices.push_back(i); } } return(less_than_indices); } template <class T> Vector<size_t> Vector<T>::calculate_greater_equal_to_indices(const T &value) const { const size_t this_size = this->size(); Vector<size_t> greater_than_indices; for(size_t i = 0; i < this_size; i++) { if((*this)[i] >= value) { greater_than_indices.push_back(i); } } return(greater_than_indices); } /// Returns a vector containing the sum of the frequencies of the bins to which /// this vector belongs. /// @param histograms Used histograms. template <class T> Vector<size_t> Vector<T>::calculate_total_frequencies( const Vector< Histogram<T> > &histograms) const { const size_t histograms_number = histograms.size(); Vector<size_t> total_frequencies(histograms_number); for(size_t i = 0; i < histograms_number; i++) { total_frequencies[i] = histograms[i].calculate_frequency((*this)[i]); } return(total_frequencies); } /// Returns a vector containing the sum of the frequencies of the bins to which /// this vector /// blongs. /// @param instance_missing_values Missing values /// @param histograms Used histograms template <class T> Vector<size_t> Vector<T>::calculate_total_frequencies_missing_values( const Vector<size_t>& instance_missing_values, const Vector< Histogram<T> >& histograms) const { const size_t histograms_number = histograms.size(); Vector<size_t> total_frequencies; for(size_t i = 0; i < histograms_number; i++) { if(!(instance_missing_values.contains(i))) { total_frequencies[i] = histograms[i].calculate_frequency((*this)[i]); } else { total_frequencies[i] = 0; } } return(total_frequencies); } /// Returns vector with the Box-Cox transformation. /// @param lambda Exponent of the Box-Cox transformation. template <class T> Vector<double> Vector<T>::perform_Box_Cox_transformation(const double& lambda) const { const size_t size = this->size(); Vector<double> vector_tranformation(size); for(size_t i = 0; i < size; i++) { if(fabs(lambda - 0) < numeric_limits<double>::epsilon()) { vector_tranformation[i] = log(static_cast<double>((*this)[i])); } else { vector_tranformation[i] = (pow(static_cast<double>((*this)[i]), lambda) - 1)/lambda; } } return vector_tranformation; } /// Returns a vector containing the relative frequencies of the elements. /// @param total_sum Sum of the elements of the vector template <class T> Vector<double> Vector<T>::calculate_percentage(const size_t& total_sum) const { const size_t this_size = this->size(); Vector<double> percentage_vector(this_size); for(size_t i = 0; i < this_size; i++) { percentage_vector[i] = static_cast<double>((*this)[i])/static_cast<double>(total_sum*100.0); } return percentage_vector; } template <class T> double Vector<T>::calculate_error(const Vector<T>& other_vector) const { const size_t this_size = this->size(); Vector<double> error(this_size,0); for(size_t i = 0; i < this_size; i++) { if(other_vector[i] != 0) { error[i] = static_cast<double>(abs((*this)[i] - other_vector[i])); } } error = error.filter_not_equal_to(0); double error_mean = error.calculate_mean(); return error_mean; } /// Returns the smallest element in the vector. template <class T> T Vector<T>::calculate_minimum() const { Vector<T> copy(*this); typename vector<T>::iterator result = min_element(copy.begin(), copy.end()); return(*result); } /// Returns the largest element in the vector. template <class T> T Vector<T>::calculate_maximum() const { Vector<T> copy(*this); typename vector<T>::iterator result = max_element(copy.begin(), copy.end()); return(*result); } /// Returns a vector containing the smallest and the largest elements in the /// vector. template <class T> Vector<T> Vector<T>::calculate_minimum_maximum() const { Vector<T> copy(*this); typename vector<T>::iterator minimum = min_element(copy.begin(), copy.end()); typename vector<T>::iterator maximum = max_element(copy.begin(), copy.end()); return {*minimum, *maximum}; } /// Returns the smallest element in the vector. template <class T> T Vector<T>::calculate_minimum_missing_values( const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); T minimum = numeric_limits<T>::max(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < minimum && !missing_indices.contains(i)) { minimum = (*this)[i]; } } return(minimum); } /// Returns the largest element in the vector. template <class T> T Vector<T>::calculate_maximum_missing_values( const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); T maximum; if(numeric_limits<T>::is_signed) { maximum = -numeric_limits<T>::max(); } else { maximum = 0; } for(size_t i = 0; i < this_size; i++) { if((*this)[i] > maximum && !missing_indices.contains(i)) { maximum = (*this)[i]; } } return(maximum); } /// Returns a vector containing the smallest and the largest elements in the /// vector. template <class T> Vector<T> Vector<T>::calculate_minimum_maximum_missing_values( const Vector<size_t> &missing_indices) const { size_t this_size = this->size(); T minimum = numeric_limits<T>::max(); T maximum; if(numeric_limits<T>::is_signed) { maximum = -numeric_limits<T>::max(); } else { maximum = 0; } for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { if((*this)[i] < minimum) { minimum = (*this)[i]; } if((*this)[i] > maximum) { maximum = (*this)[i]; } } } return {minimum, maximum}; } /// Calculates the explained variance for a given vector(principal components analysis). /// This method returns a vector whose size is the same as the size of the given vector. template<class T> Vector<T> Vector<T>::calculate_explained_variance() const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_explained_variance() const method.\n" << "Size of the vector must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const double this_sum = this->calculate_absolute_value().calculate_sum(); #ifdef __OPENNN_DEBUG__ if(this_sum == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_explained_variance() const method.\n" << "Sum of the members of the vector must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif #ifdef __OPENNN_DEBUG__ if(this_sum < 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_explained_variance() const method.\n" << "Sum of the members of the vector cannot be negative.\n"; throw logic_error(buffer.str()); } #endif Vector<double> explained_variance(this_size); for(size_t i = 0; i < this_size; i++) { explained_variance[i] = ((*this)[i]/this_sum)*100.0; if(explained_variance[i] - 0.0 < 1.0e-16) { explained_variance[i] = 0.0; } } #ifdef __OPENNN_DEBUG__ if(explained_variance.calculate_sum() != 1.0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_explained_variance() const method.\n" << "Sum of explained variance must be 1.\n"; throw logic_error(buffer.str()); } #endif return explained_variance; } /// This method bins the elements of the vector into a given number of equally /// spaced containers. /// It returns a vector of two vectors. /// The size of both subvectors is the number of bins. /// The first subvector contains the frequency of the bins. /// The second subvector contains the center of the bins. template <class T> Histogram<T> Vector<T>::calculate_histogram(const size_t &bins_number) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(bins_number < 1) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Histogram<T> calculate_histogram(const size_t&) const method.\n" << "Number of bins is less than one.\n"; throw logic_error(buffer.str()); } #endif Vector<T> minimums(bins_number); Vector<T> maximums(bins_number); Vector<T> centers(bins_number); Vector<size_t> frequencies(bins_number, 0); const Vector<T> minimum_maximum = calculate_minimum_maximum(); const T minimum = minimum_maximum[0]; const T maximum = minimum_maximum[1]; const double length = (maximum - minimum) /static_cast<double>(bins_number); minimums[0] = minimum; maximums[0] = minimum + length; centers[0] = (maximums[0] + minimums[0]) / 2.0; // Calculate bins center for(size_t i = 1; i < bins_number; i++) { minimums[i] = minimums[i - 1] + length; maximums[i] = maximums[i - 1] + length; centers[i] = (maximums[i] + minimums[i]) / 2.0; } // Calculate bins frequency const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { for(size_t j = 0; j < bins_number - 1; j++) { if((*this)[i] >= minimums[j] &&(*this)[i] < maximums[j]) { frequencies[j]++; } } if((*this)[i] >= minimums[bins_number - 1]) { frequencies[bins_number - 1]++; } } Histogram<T> histogram(bins_number); histogram.centers = centers; histogram.minimums = minimums; histogram.maximums = maximums; histogram.frequencies = frequencies; return(histogram); } /// This method bins the elements of the vector into a given number of equally /// spaced containers. /// It returns a vector of two vectors. /// The size of both subvectors is the number of bins. /// The first subvector contains the frequency of the bins. /// The second subvector contains the center of the bins. template <class T> Histogram<T> Vector<T>::calculate_histogram_binary() const { // Control sentence(if debug) Vector<T> minimums(2); Vector<T> maximums(2); Vector<T> centers(2); Vector<size_t> frequencies(2, 0); minimums[0] = 0.0; maximums[0] = 0.0; centers[0] = 0.0; minimums[1] = 1.0; maximums[1] = 1.0; centers[1] = 1.0; // Calculate bins frequency const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { for(size_t j = 0; j < 2; j++) { if((*this)[i] == minimums[j]) { frequencies[j]++; } } } Histogram<T> histogram(2); histogram.centers = centers; histogram.minimums = minimums; histogram.maximums = maximums; histogram.frequencies = frequencies; return(histogram); } /// This method bins the elements of the vector into a given number of equally /// spaced containers. /// It returns a vector of two vectors. /// The size of both subvectors is the number of bins. /// The first subvector contains the frequency of the bins. /// The second subvector contains the center of the bins. template <class T> Histogram<T> Vector<T>::calculate_histogram_integers(const size_t& bins_number) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(bins_number < 1) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Histogram<T> calculate_histogram(const size_t&) const method.\n" << "Number of bins is less than one.\n"; throw logic_error(buffer.str()); } #endif Vector<T> centers = this->get_integer_elements(bins_number); const size_t centers_number = centers.size(); sort(centers.begin(), centers.end(), less<T>()); Vector<T> minimums(centers_number); Vector<T> maximums(centers_number); Vector<size_t> frequencies(centers_number); for(size_t i = 0; i < centers_number; i++) { minimums[i] = centers[i]; maximums[i] = centers[i]; frequencies[i] = this->count_equal_to(centers[i]); } Histogram<T> histogram(centers_number); histogram.centers = centers; histogram.minimums = minimums; histogram.maximums = maximums; histogram.frequencies = frequencies; return histogram; } /// This method bins the elements of the vector into a given number of equally /// spaced containers. /// It returns a vector of two vectors. /// The size of both subvectors is the number of bins. /// The first subvector contains the frequency of the bins. /// The second subvector contains the center of the bins. template <class T> Histogram<T> Vector<T>::calculate_histogram_missing_values( const Vector<size_t> &missing_indices, const size_t &bins_number) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(bins_number < 1) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Histogram<T> calculate_histogram_missing_values(const " "Vector<size_t>&, const size_t&) const method.\n" << "Number of bins is less than one.\n"; throw logic_error(buffer.str()); } #endif Vector<T> minimums(bins_number); Vector<T> maximums(bins_number); Vector<T> centers(bins_number); Vector<size_t> frequencies(bins_number, 0); const Vector<T> minimum_maximum = calculate_minimum_maximum_missing_values(missing_indices); const T minimum = minimum_maximum[0]; const T maximum = minimum_maximum[1]; const double length = (maximum - minimum) /static_cast<double>(bins_number); minimums[0] = minimum; maximums[0] = minimum + length; centers[0] = (maximums[0] + minimums[0]) / 2.0; // Calculate bins center for(size_t i = 1; i < bins_number; i++) { minimums[i] = minimums[i - 1] + length; maximums[i] = maximums[i - 1] + length; centers[i] = (maximums[i] + minimums[i]) / 2.0; } // Calculate bins frequency const size_t this_size = this->size(); for(int i = 0; i < static_cast<int>(this_size); i++) { if(!missing_indices.contains(static_cast<size_t>(i))) { for(int j = 0; j < static_cast<int>(bins_number) - 1; j++) { if((*this)[i] >= minimums[j] &&(*this)[i] < maximums[j]) { frequencies[static_cast<size_t>(j)]++; } } if((*this)[i] >= minimums[bins_number - 1]) { frequencies[bins_number - 1]++; } } } Histogram<T> histogram(bins_number); histogram.centers = centers; histogram.minimums = minimums; histogram.maximums = maximums; histogram.frequencies = frequencies; return(histogram); } /// This method bins the elements of the vector into a given number of equally /// spaced containers. /// It returns a vector of two vectors. /// The size of both subvectors is the number of bins. /// The first subvector contains the frequency of the bins. /// The second subvector contains the center of the bins. /// @param missing_indices Indices of the instances with missing values. template <class T> Histogram<T> Vector<T>::calculate_histogram_binary_missing_values(const Vector<size_t>& missing_indices) const { // Control sentence(if debug) Vector<T> minimums(2); Vector<T> maximums(2); Vector<T> centers(2); Vector<size_t> frequencies(2, 0); minimums[0] = 0.0; maximums[0] = 0.0; centers[0] = 0.0; minimums[1] = 1.0; maximums[1] = 1.0; centers[1] = 1.0; // Calculate bins frequency const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { for(size_t j = 0; j < 2; j++) { if((*this)[i] == minimums[j]) { frequencies[j]++; } } } } Histogram<T> histogram(2); histogram.centers = centers; histogram.minimums = minimums; histogram.maximums = maximums; histogram.frequencies = frequencies; return(histogram); } /// This method bins the elements of the vector into a given number of equally /// spaced containers. /// It returns a vector of two vectors. /// The size of both subvectors is the number of bins. /// The first subvector contains the frequency of the bins. /// The second subvector contains the center of the bins. /// @param missing_indices Indices of the instances with missing values. /// @param bins_number Number of bins of the histogram. template <class T> Histogram<T> Vector<T>::calculate_histogram_integers_missing_values(const Vector<size_t>& missing_indices, const size_t& bins_number) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(bins_number < 1) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Histogram<T> calculate_histogram(const size_t&) const method.\n" << "Number of bins is less than one.\n"; throw logic_error(buffer.str()); } #endif Vector<T> centers = this->get_integer_elements_missing_values(missing_indices, bins_number); const size_t centers_number = centers.size(); sort(centers.begin(), centers.end(), less<T>()); Vector<T> minimums(centers_number); Vector<T> maximums(centers_number); Vector<size_t> frequencies(centers_number); for(size_t i = 0; i < centers_number; i++) { minimums[i] = centers[i]; maximums[i] = centers[i]; frequencies[i] = this->count_equal_to(centers[i]); } Histogram<T> histogram(centers_number); histogram.centers = centers; histogram.minimums = minimums; histogram.maximums = maximums; histogram.frequencies = frequencies; return histogram; } /// Finds the first element in the vector with a given value, and returns its index. /// @param value Value to be found. template <class T> size_t Vector<T>::get_first_index(const T& value) const { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] == value) { return(i); } } ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "size_t get_first_index(const T&) const.\n" << "Value not found in vector.\n"; throw logic_error(buffer.str()); } /// Returns the index of the smallest element in the vector. template <class T> size_t Vector<T>::calculate_minimal_index() const { Vector<T> copy(*this); typename vector<T>::iterator result = min_element(copy.begin(), copy.end()); return(distance(copy.begin(), result)); } /// Returns the index of the largest element in the vector. template <class T> size_t Vector<T>::calculate_maximal_index() const { Vector<T> copy(*this); typename vector<T>::iterator result = max_element(copy.begin(), copy.end()); return( distance(copy.begin(), result)); } ///// Returns the indices of the smallest elements in the vector. ///// @param number Number of minimal indices to be computed. template <class T> Vector<size_t> Vector<T>::calculate_minimal_indices(const size_t &number) const { const size_t this_size = this->size(); const Vector<size_t> rank = calculate_less_rank(); Vector<size_t> minimal_indices(number); // #pragma omp parallel for for(int i = 0; i < static_cast<int>(this_size); i++) { for(size_t j = 0; j < number; j++) { if(rank[static_cast<size_t>(i)] == j) { minimal_indices[j] = static_cast<size_t>(i); } } } return(minimal_indices); } ///// Returns the indices of the smallest elements in the vector. ///// Used for big vectors. ///// @param number Number of minimal indices to be computed. template <class T> Vector<size_t> Vector<T>::calculate_k_minimal_indices(const size_t &number) const{ const size_t this_size = this->size(); Vector<size_t> minimal_indices(number); Vector<double> minimal_values(number); #pragma omp parallel for for(int i = 0; i < this_size; i++) { double current_value = this->data()[i]; size_t max_value_index = minimal_values.calculate_maximal_index(); if(i<number) { minimal_indices[static_cast<size_t>(i)] = static_cast<size_t>(i); minimal_values[static_cast<size_t>(i)] = current_value; } else { for(int j=0; j<number; j++) { if(current_value < minimal_values[static_cast<size_t>(j)]) { minimal_indices[max_value_index] = static_cast<size_t>(i); minimal_values[max_value_index] = current_value; break; } } } } const Vector<size_t> sorted_indices = minimal_values.calculate_minimal_indices(number); Vector<size_t> sorted_minimal_indices(number); for(int i = 0; i <number; i++) { sorted_minimal_indices[static_cast<size_t>(i)] = minimal_indices[sorted_indices[static_cast<size_t>(i)]]; } return(sorted_minimal_indices); } /// Returns the indices of the largest elements in the vector. /// @param number Number of maximal indices to be computed. template <class T> Vector<size_t> Vector<T>::calculate_maximal_indices(const size_t &number) const { const size_t this_size = this->size(); const Vector<size_t> rank = calculate_greater_rank(); Vector<size_t> maximal_indices(number); for(size_t i = 0; i < this_size; i++) { for(size_t j = 0; j < number; j++) { if(rank[i] == j) { maximal_indices[j] = i; } } } return(maximal_indices); } /// Returns a vector with the indices of the smallest and the largest elements /// in the vector. template <class T> Vector<size_t> Vector<T>::calculate_minimal_maximal_index() const { const size_t this_size = this->size(); T minimum = (*this)[0]; T maximum = (*this)[0]; size_t minimal_index = 0; size_t maximal_index = 0; for(size_t i = 1; i < this_size; i++) { if((*this)[i] < minimum) { minimum = (*this)[i]; minimal_index = i; } if((*this)[i] > maximum) { maximum = (*this)[i]; maximal_index = i; } } Vector<size_t> minimal_maximal_index(2); minimal_maximal_index[0] = minimal_index; minimal_maximal_index[1] = maximal_index; return(minimal_maximal_index); } /// Returns a vector with the elements of this vector raised to a power /// exponent. /// @param exponent Pow exponent. template <class T> Vector<T> Vector<T>::calculate_pow(const T &exponent) const { const size_t this_size = this->size(); Vector<T> power(this_size); for(size_t i = 0; i < this_size; i++) { power[i] = pow((*this)[i], exponent); } return(power); } /// Returns the competitive vector of this vector, /// whose elements are one the bigest element of this vector, and zero for the /// other elements. template <class T> Vector<T> Vector<T>::calculate_competitive() const { const size_t this_size = this->size(); Vector<T> competitive(this_size, 0); const size_t maximal_index = calculate_maximal_index(); competitive[maximal_index] = 1; return(competitive); } /// Returns the softmax vector of this vector, /// whose elements sum one, and can be interpreted as probabilities. template <class T> Vector<T> Vector<T>::calculate_softmax() const { const size_t this_size = this->size(); Vector<T> softmax(this_size); T sum = 0; for(size_t i = 0; i < this_size; i++) { sum += exp((*this)[i]); } for(size_t i = 0; i < this_size; i++) { softmax[i] = exp((*this)[i]) / sum; } return(softmax); } /// Returns the softmax Jacobian of this vector. template <class T> Matrix<T> Vector<T>::calculate_softmax_Jacobian() const { const size_t this_size = this->size(); Matrix<T> softmax_Jacobian(this_size, this_size); for(size_t i = 0; i < this_size; i++) { for(size_t j = 0; j < this_size; j++) { if(i == j) { softmax_Jacobian(i, i) = (*this)[i] *(1.0 -(*this)[i]); } else { softmax_Jacobian(i, i) = (*this)[i] *(*this)[j]; } } } return(softmax_Jacobian); } /// This method converts the values of the vector to be binary. /// The threshold value used is 0.5. template <class T> Vector<bool> Vector<T>::calculate_binary() const { const size_t this_size = this->size(); Vector<bool> binary(this_size); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < 0.5) { binary[i] = false; } else { binary[i] = true; } } return(binary); } /// This method calculates the square root of each element in the vector. template <class T> Vector<T> Vector<T>::calculate_square_root_elements() const { const size_t this_size = this->size(); Vector<T> square(this_size); for(size_t i = 0; i < this_size; i++) { square[i]=sqrt((*this)[i]); } return(square); } /// Return the cumulative vector of this vector, /// where each element is summed up with all the previous ones. template <class T> Vector<T> Vector<T>::calculate_cumulative() const { const size_t this_size = this->size(); Vector<T> cumulative(this_size); if(this_size > 0) { cumulative[0] = (*this)[0]; for(size_t i = 1; i < this_size; i++) { cumulative[i] = cumulative[i - 1] + (*this)[i]; } } return(cumulative); } /// This method applies only to cumulative vectors. /// It returns the index of the first element which is greater than a given /// value. /// @param value Value. template <class T> size_t Vector<T>::calculate_cumulative_index(const T &value) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "size_t calculate_cumulative_index(const T&) const.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } T cumulative_value = (*this)[this_size - 1]; if(value > cumulative_value) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "size_t calculate_cumulative_index(const T&) const.\n" << "Value(" << value << ") must be less than cumulative value(" << cumulative_value << ").\n"; throw logic_error(buffer.str()); } for(size_t i = 1; i < this_size; i++) { if((*this)[i] <(*this)[i - 1]) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "int calculate_cumulative_index(const T&) const.\n" << "Vector elements must be crescent.\n"; throw logic_error(buffer.str()); } } #endif if(value <= (*this)[0]) { return(0); } for(size_t i = 1; i < this_size; i++) { if(value >(*this)[i - 1] && value <= (*this)[i]) { return(i); } } return(this_size - 1); } /// Returns the index of the closest element in the vector to a given value. template <class T> size_t Vector<T>::calculate_closest_index(const T &value) const { const Vector<T> difference = (*this - value).calculate_absolute_value(); const size_t closest_index = difference.calculate_minimal_index(); return(closest_index); } /// Returns the sum of the elements in the vector. template <class T> T Vector<T>::calculate_sum() const { const size_t this_size = this->size(); T sum = 0; for(size_t i = 0; i < this_size; i++) { sum += (*this)[i]; } return(sum); } /// Returns the sum of the elements with the given indices. /// @param indices Indices of the elementes to sum. template <class T> T Vector<T>::calculate_partial_sum(const Vector<size_t> &indices) const { const size_t this_size = this->size(); T sum = 0; for(size_t i = 0; i < this_size; i++) { if(indices.contains(i)) { sum += (*this)[i]; } } return(sum); } /// Returns the sum of the elements in the vector. template <class T> T Vector<T>::calculate_sum_missing_values( const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); T sum = 0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { sum += (*this)[i]; } } return(sum); } /// Returns the product of the elements in the vector. template <class T> T Vector<T>::calculate_product() const { const size_t this_size = this->size(); T product = 1; for(size_t i = 0; i < this_size; i++) { product *= (*this)[i]; } return(product); } template <class T> Vector<T> Vector<T>::calculate_moving_average_cyclic(const T& parameter) const { const size_t this_size = this->size(); Vector<T> moving_average(this_size); moving_average[0] = ((*this)[0]+ (parameter*((*this)[1]+ (*this)[this_size-1])))/(1+ (2*parameter)); moving_average[this_size-1] = ((*this)[this_size-1]+ (parameter*((*this)[this_size-2]+ (*this)[0])))/(1+ (2*parameter)); for(size_t i = 1; i < this->size()-1; i++) { moving_average[i] = ((*this)[i]+ (parameter*((*this)[i-1]+ (*this)[i+1])))/(1.0+2.0*parameter); } return(moving_average); } template <class T> Vector<T> Vector<T>::calculate_moving_average(const T& parameter) const { const size_t this_size = this->size(); Vector<T> moving_average(this_size); moving_average[0] = ((*this)[0]+ (parameter*(*this)[1]))/(1.0+parameter); moving_average[this_size-1] = ((*this)[this_size-1]+ (parameter*(*this)[this_size-2]))/(1.0+parameter); for(size_t i = 1; i < this_size-1; i++) { moving_average[i] = ((*this)[i]+ (parameter*((*this)[i-1]+ (*this)[i+1])))/(1.0+2.0*parameter); } return(moving_average); } template <class T> Vector<double> Vector<T>::calculate_simple_moving_average(const size_t& period) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(period < 1) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_simple_moving_average(const size_t&) const method.\n" << "Period must be equal or greater than zero.\n"; throw logic_error(buffer.str()); } #endif const size_t this_size = this->size(); Vector<double> simple_moving_average(this_size, 0.0); #pragma omp parallel for for(int i = 0; i < this_size; i++) { const size_t begin = i < period ? 0 : static_cast<size_t>(i) - period + 1; const size_t end = static_cast<size_t>(i); simple_moving_average[i] = calculate_mean(begin, end); } return simple_moving_average; } template <class T> Vector<double> Vector<T>::calculate_exponential_moving_average(const size_t& period) const { const size_t size = this->size(); Vector<double> exponential_moving_average(size); exponential_moving_average[0] = (*this)[0]; const double multiplier = 2.0 / double(period + 1.0); for(size_t i = 1; i < size; i++) { exponential_moving_average[i] = (*this)[i] * multiplier + exponential_moving_average[i-1] *(1.0 - multiplier); } return exponential_moving_average; } template <class T> double Vector<T>::calculate_last_exponential_moving_average(const size_t& period) const { const Vector<double> exponential_moving_average = calculate_exponential_moving_average(period); return exponential_moving_average.get_last(); } template <class T> Vector<double> Vector<T>::calculate_exponential_moving_average_with_initial_average(const size_t& period) const { const size_t size = this->size(); Vector<double> exponential_moving_average(size); double initial_average = 0.0; for(size_t i = 0; i < period; i++) { initial_average += (*this)[i]; } initial_average /= period; exponential_moving_average[0] = initial_average; const double multiplier = 2 / double(period + 1.0); for(size_t i = 1; i < size; i++) { exponential_moving_average[i] = (*this)[i] * multiplier + exponential_moving_average[i-1] *(1 - multiplier); } return(exponential_moving_average); } /// Returns the mean of the elements in the vector. template <class T> double Vector<T>::calculate_mean() const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_mean() const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const T sum = calculate_sum(); const double mean = sum /static_cast<double>(this_size); return(mean); } /// Returns the mean of the subvector defined by a start and end elements. /// @param begin Start element. /// @param end End element. template <class T> double Vector<T>::calculate_mean(const size_t& begin, const size_t& end) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(begin > end) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_mean(const size_t&, const size_t&) const method.\n" << "Begin must be less or equal than end.\n"; throw logic_error(buffer.str()); } #endif if(end == begin) return(*this)[begin]; double sum = 0.0; for(size_t i = begin; i <= end; i++) { sum += (*this)[i]; } return(sum /static_cast<double>(end-begin+1)); } /// Returns the linear slope of this vector. template <class T> double Vector<T>::calculate_linear_trend() const { const size_t this_size = this->size(); const Vector<double> independent_variable(0.0,1.0,static_cast<double>(this_size-1)); const LinearRegressionParameters<T> linear_regression_parameters = calculate_linear_regression_parameters(independent_variable); return(linear_regression_parameters.slope); } /// Returns the linear slope of a subvector defined by two elements. /// @param start First element. /// @param end Last element. template <class T> double Vector<T>::calculate_linear_trend(const size_t& start, const size_t& end) const { const Vector<size_t> indices(start, 1, end); const Vector<double> dependent_variable = get_subvector(indices); return(dependent_variable.calculate_linear_trend()); } template <class T> double Vector<T>::calculate_percentage_of_variation() const { const double percentage_of_variation = ((*this).get_last()-(*this).get_first())*100.0/(*this).get_first(); return percentage_of_variation; } // @todo template <class T> Vector<double> Vector<T>::calculate_percentage_of_variation(const size_t& period) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ if(this_size < period) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<double> calculate_percentage_of_variation(const size_t&) const method.\n" << "Size must be greater than period.\n"; throw logic_error(buffer.str()); } #endif Vector<double> percentage_of_variation(this_size, 0.0); if((*this) == 0.0) { return percentage_of_variation; } for(int i = 0; i < this_size; i++) { const size_t begin = i < period ? 0 : static_cast<size_t>(i) - period + 1; const size_t end = static_cast<size_t>(i); if((*this)[begin] != 0.0) { percentage_of_variation[i] = ((*this)[end] -(*this)[begin]) * 100.0 /(*this)[begin]; } else { percentage_of_variation[static_cast<size_t>(i)] = percentage_of_variation[static_cast<size_t>(i-1)]; } } return percentage_of_variation; } template <class T> double Vector<T>::calculate_last_percentage_of_variation(const size_t& period) const { const Vector<double> percentage_of_variation = calculate_percentage_of_variation(period); return percentage_of_variation.get_last(); } /// Returns the mode of the vector, i.e., the element with most occurrences. template <class T> T Vector<T>::calculate_mode() const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_mode() const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const Vector<T> unique = get_unique_elements(); const size_t maximal_index = count_unique().calculate_maximal_index(); return(unique[maximal_index]); } /// Returns the mode of the vector, when it has missing values. /// @param missing_indices Indices of the missing values in the vector. template <class T> T Vector<T>::calculate_mode_missing_values(const Vector<size_t>& missing_indices) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_mode_missing_values(const Vector<size_t>&) const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const size_t missing_indices_size = missing_indices.size(); Vector<T> new_vector(this_size - missing_indices_size); size_t count = 0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { new_vector[count] = (*this)[i]; count++; } } return(new_vector.calculate_mode()); } /// Returns the variance of the elements in the vector. template <class T> double Vector<T>::calculate_variance() const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_variance() const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif if(this_size == 1) { return(0.0); } double sum = 0.0; double squared_sum = 0.0; for(size_t i = 0; i < this_size; i++) { sum += (*this)[i]; squared_sum += (*this)[i] *(*this)[i]; } const double numerator = squared_sum -(sum * sum) /static_cast<double>(this_size); const double denominator = this_size - 1.0; if(denominator == 0.0) { return 0.0; } else { return(numerator / denominator); } } /// Returns the covariance of this vector and other vector template<class T> double Vector<T>::calculate_covariance(const Vector<double>& other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_covariance(const Vector<double>&) const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size != other_vector.size()) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_covariance(const Vector<double>&) const method.\n" << "Size of this vectro must be equal to size of other vector.\n"; throw logic_error(buffer.str()); } #endif if(this_size == 1) { return 0.0; } const double this_mean = this->calculate_mean(); const double other_mean = other_vector.calculate_mean(); double numerator = 0.0; double denominator = static_cast<double>(this_size-1); for(size_t i = 0; i < this_size; i++) { numerator += ((*this)[i]-this_mean)*(other_vector[i]-other_mean); } return(numerator/denominator); } /// Returns the standard deviation of the elements in the vector. template <class T> double Vector<T>::calculate_standard_deviation() const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_standard_deviation() const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif return(sqrt(calculate_variance())); } template <class T> Vector<double> Vector<T>::calculate_standard_deviation(const size_t& period) const { const size_t this_size = this->size(); Vector<double> standard_deviation(this_size, 0.0); double mean = 0.0; double sum = 0.0; for(size_t i = 0; i < this_size; i++) { const size_t begin = i < period ? 0 : i - period + 1; const size_t end = i; mean = calculate_mean(begin,end); for(size_t j = begin; j < end+1; j++) { sum += ((*this)[j] - mean) *((*this)[j] - mean); } standard_deviation[i] = sqrt(sum / double(period)); mean = 0.0; sum = 0.0; } standard_deviation[0] = standard_deviation[1]; return(standard_deviation); } /// Returns the asymmetry of the elements in the vector template <class T> double Vector<T>::calculate_asymmetry() const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_asymmetry() const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif if(this_size == 1) { return 0.0; } const double standard_deviation = calculate_standard_deviation(); const double mean = calculate_mean(); double sum = 0.0; for(size_t i = 0; i < this_size; i++) { sum += ((*this)[i] - mean)*((*this)[i] - mean)*((*this)[i] - mean); } const double numerator = sum /static_cast<double>(this_size); const double denominator = standard_deviation * standard_deviation * standard_deviation; return(numerator / denominator); } /// Returns the kurtosis value of the elements in the vector. template <class T> double Vector<T>::calculate_kurtosis() const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_kurtosis() const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif if(this_size == 1) { return 0.0; } const double standard_deviation = calculate_standard_deviation(); const double mean = calculate_mean(); double sum = 0.0; for(size_t i = 0; i < this_size; i++) { sum += ((*this)[i] - mean)*((*this)[i] - mean)*((*this)[i] - mean)*((*this)[i] - mean); } const double numerator = sum/static_cast<double>(this_size); const double denominator = standard_deviation*standard_deviation*standard_deviation*standard_deviation; return((numerator/denominator)-3.0); } /// Returns the mean and the standard deviation of the elements in the vector. template <class T> Vector<double> Vector<T>::calculate_mean_standard_deviation() const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_mean_standard_deviation().\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const double mean = calculate_mean(); const double standard_deviation = calculate_standard_deviation(); return {mean, standard_deviation}; } /// Returns the median of the elements in the vector template <class T> double Vector<T>::calculate_median() const { const size_t this_size = this->size(); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); size_t median_index; if(this_size % 2 == 0) { median_index = static_cast<size_t>(this_size / 2); return((sorted_vector[median_index-1] + sorted_vector[median_index]) / 2.0); } else { median_index = static_cast<size_t>(this_size / 2); return(sorted_vector[median_index]); } } /// Returns the quarters of the elements in the vector. template <class T> Vector<double> Vector<T>::calculate_quartiles() const { const size_t this_size = this->size(); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); Vector<double> quartiles(3); if(this_size == 1) { quartiles[0] = sorted_vector[0]; quartiles[1] = sorted_vector[0]; quartiles[2] = sorted_vector[0]; } else if(this_size == 2) { quartiles[0] = (sorted_vector[0]+sorted_vector[1])/4; quartiles[1] = (sorted_vector[0]+sorted_vector[1])/2; quartiles[2] = (sorted_vector[0]+sorted_vector[1])*3/4; } else if(this_size == 3) { quartiles[0] = (sorted_vector[0]+sorted_vector[1])/2; quartiles[1] = sorted_vector[1]; quartiles[2] = (sorted_vector[2]+sorted_vector[1])/2; } else if(this_size % 2 == 0) { quartiles[0] = sorted_vector.get_first(this_size/2).calculate_median(); quartiles[1] = sorted_vector.calculate_median(); quartiles[2] = sorted_vector.get_last(this_size/2).calculate_median(); } else { quartiles[0] = sorted_vector[int(this_size/4)]; quartiles[1] = sorted_vector[int(this_size/2)]; quartiles[2] = sorted_vector[int(this_size*3/4)]; } return(quartiles); } template <class T> Vector<double> Vector<T>::calculate_percentiles() const { const size_t this_size = this->size(); const Vector<size_t> sorted_vector = this->sort_ascending(); Vector<double> percentiles(10); if(this_size % 2 == 0) { percentiles[0] = (sorted_vector[this_size / 10] + sorted_vector[this_size / 10 + 1]) / 2; percentiles[1] = (sorted_vector[this_size * 2 / 10] + sorted_vector[this_size * 2 / 10 + 1]) / 2; percentiles[2] = (sorted_vector[this_size * 3 / 10] + sorted_vector[this_size * 3 / 10 + 1]) / 2; percentiles[3] = (sorted_vector[this_size * 4 / 10] + sorted_vector[this_size * 4 / 10 + 1]) / 2; percentiles[4] = (sorted_vector[this_size * 5 / 10] + sorted_vector[this_size * 5 / 10 + 1]) / 2; percentiles[5] = (sorted_vector[this_size * 6 / 10] + sorted_vector[this_size * 6 / 10 + 1]) / 2; percentiles[6] = (sorted_vector[this_size * 7 / 10] + sorted_vector[this_size * 7 / 10 + 1]) / 2; percentiles[7] = (sorted_vector[this_size * 8 / 10] + sorted_vector[this_size * 8 / 10 + 1]) / 2; percentiles[8] = (sorted_vector[this_size * 9 / 10] + sorted_vector[this_size * 9 / 10 + 1]) / 2; percentiles[9] = calculate_maximum(); } else { percentiles[0] = sorted_vector[this_size / 10]; percentiles[1] = sorted_vector[this_size * 2 / 10]; percentiles[2] = sorted_vector[this_size * 3 / 10]; percentiles[3] = sorted_vector[this_size * 4 / 10]; percentiles[4] = sorted_vector[this_size * 5 / 10]; percentiles[5] = sorted_vector[this_size * 6 / 10]; percentiles[6] = sorted_vector[this_size * 7 / 10]; percentiles[7] = sorted_vector[this_size * 8 / 10]; percentiles[8] = sorted_vector[this_size * 9 / 10]; percentiles[9] = calculate_maximum(); } return(percentiles); } /// Returns the quarters of the elements in the vector when there are missing values. /// @param missing_indices Vector with the indices of the missing values. template <class T> Vector<double> Vector<T>::calculate_quartiles_missing_values(const Vector<size_t> & missing_indices) const { const size_t this_size = this->size(); const size_t missing_indices_number = missing_indices.size(); const Vector<T> values_to_remove = this->get_subvector(missing_indices); Vector<T> sorted_vector(*this); sorted_vector = sorted_vector.difference(values_to_remove); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); const size_t actual_size = this_size - missing_indices_number; Vector<double> quartiles(3); if(actual_size % 2 == 0) { quartiles[0] = (sorted_vector[actual_size / 4] + sorted_vector[actual_size / 4 + 1]) / 2.0; quartiles[1] = (sorted_vector[actual_size * 2 / 4] + sorted_vector[actual_size * 2 / 4 + 1]) / 2.0; quartiles[2] = (sorted_vector[actual_size * 3 / 4] + sorted_vector[actual_size * 3 / 4 + 1]) / 2.0; } else { quartiles[0] = sorted_vector[actual_size / 4]; quartiles[1] = sorted_vector[actual_size * 2 / 4]; quartiles[2] = sorted_vector[actual_size * 3 / 4]; } return(quartiles); } /// Returns the mean of the elements in the vector. template <class T> double Vector<T>::calculate_mean_missing_values(const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_mean_missing_values(const Vector<size_t>&) " "const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif T sum = 0; size_t count = 0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { sum += (*this)[i]; count++; } } const double mean = sum /static_cast<double>(count); return(mean); } /// Returns the variance of the elements in the vector. template <class T> double Vector<T>::calculate_variance_missing_values( const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_variance_missing_values(const Vector<size_t>&) " "const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif double sum = 0.0; double squared_sum = 0.0; size_t count = 0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { sum += (*this)[i]; squared_sum += (*this)[i] *(*this)[i]; count++; } } if(count <= 1) { return(0.0); } const double numerator = squared_sum -(sum * sum) /static_cast<double>(count); const double denominator = this_size - 1.0; return(numerator / denominator); } /// Returns the weighted mean of the vector. /// @param weights Weights of the elements of the vector in the mean. template <class T> double Vector<T>::calculate_weighted_mean(const Vector<double> & weights) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_weighted_mean(const Vector<double>&) const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } const size_t weights_size = weights.size(); if(this_size != weights_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_weighted_mean(const Vector<double>&) " "const method.\n" << "Size of weights must be equal to vector size.\n"; throw logic_error(buffer.str()); } #endif double weights_sum = 0; T sum = 0; for(size_t i = 0; i < this_size; i++) { sum += weights[i]*(*this)[i]; weights_sum += weights[i]; } const double mean = sum / weights_sum; return(mean); } /// Returns the standard deviation of the elements in the vector. template <class T> double Vector<T>::calculate_standard_deviation_missing_values( const Vector<size_t> &missing_indices) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_standard_deviation_missing_values(const " "Vector<size_t>&) const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif return(sqrt(calculate_variance_missing_values(missing_indices))); } /// Returns the asymmetry of the elements in the vector. template <class T> double Vector<T>::calculate_asymmetry_missing_values( const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_asymmetry_missing_values(const " "Vector<size_t>&) const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif if(this_size == 1) { return 0.0; } const double standard_deviation = calculate_standard_deviation(); const double mean = calculate_mean(); double sum = 0.0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { sum += ((*this)[i] - mean) *((*this)[i] - mean) *((*this)[i] - mean); } } const double numerator = sum /static_cast<double>(this_size); const double denominator = standard_deviation * standard_deviation * standard_deviation; return(numerator / denominator); } /// Returns the kurtosis of the elements in the vector. template <class T> double Vector<T>::calculate_kurtosis_missing_values( const Vector<size_t> &missing_indices) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_kurtosis_missing_values(const Vector<size_t>&) " "const method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif if(this_size == 1) { return 0.0; } const double standard_deviation = calculate_standard_deviation(); const double mean = calculate_mean(); double sum = 0.0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { sum += ((*this)[i] - mean)*((*this)[i] - mean)*((*this)[i] - mean)*((*this)[i] - mean); } } const double numerator = sum /static_cast<double>(this_size); const double denominator = standard_deviation*standard_deviation*standard_deviation*standard_deviation; return((numerator/denominator)-3.0); } /// Returns the minimum, maximum, mean and standard deviation of the elements in /// the vector. template <class T> Statistics<T> Vector<T>::calculate_statistics() const { // Control sentence(if debug) const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_statistics().\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif Statistics<T> statistics; T minimum = numeric_limits<T>::max(); T maximum; double sum = 0; double squared_sum = 0; size_t count = 0; if(numeric_limits<T>::is_signed) { maximum = -1*numeric_limits<T>::max(); } else { maximum = 0; } for(size_t i = 0; i < this_size; i++) { if((*this)[i] < minimum) { minimum = (*this)[i]; } if((*this)[i] > maximum) { maximum = (*this)[i]; } sum += (*this)[i]; squared_sum += (*this)[i] *(*this)[i]; count++; } const double mean = sum/static_cast<double>(count); double standard_deviation; if(count <= 1) { standard_deviation = 0.0; } else { const double numerator = squared_sum -(sum * sum) / count; const double denominator = this_size - 1.0; standard_deviation = numerator / denominator; } standard_deviation = sqrt(standard_deviation); statistics.minimum = minimum; statistics.maximum = maximum; statistics.mean = mean; statistics.standard_deviation = standard_deviation; return(statistics); } /// Returns the minimum, maximum, mean and standard deviation of the elements in the vector. template <class T> Statistics<T> Vector<T>::calculate_statistics_missing_values( const Vector<size_t> &missing_indices) const { // Control sentence(if debug) const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_statistics_missing_values(const " "Vector<size_t>&).\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif Statistics<T> statistics; T minimum = numeric_limits<T>::max(); T maximum; double sum = 0; double squared_sum = 0; size_t count = 0; if(numeric_limits<T>::is_signed) { maximum = -numeric_limits<T>::max(); } else { maximum = 0; } for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { if((*this)[i] < minimum) { minimum = (*this)[i]; } if((*this)[i] > maximum) { maximum = (*this)[i]; } sum += (*this)[i]; squared_sum += (*this)[i] *(*this)[i]; count++; } } const double mean = sum/static_cast<double>(count); double standard_deviation; if(count <= 1) { standard_deviation = 0.0; } else { const double numerator = squared_sum -(sum * sum) / count; const double denominator = this_size - 1.0; standard_deviation = numerator / denominator; } standard_deviation = sqrt(standard_deviation); statistics.minimum = minimum; statistics.maximum = maximum; statistics.mean = mean; statistics.standard_deviation = standard_deviation; return(statistics); } /// Returns a vector with the asymmetry and the kurtosis values of the elements /// in the vector. template <class T> Vector<double> Vector<T>::calculate_shape_parameters() const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<double> calculate_shape_parameters().\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif Vector<double> shape_parameters(2); shape_parameters[0] = calculate_asymmetry(); shape_parameters[1] = calculate_kurtosis(); return(shape_parameters); } /// Returns a vector with the asymmetry and the kurtosis values of the elements /// in the vector. template <class T> Vector<double> Vector<T>::calculate_shape_parameters_missing_values( const Vector<size_t> &missing_values) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(this_size == 0) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_shape_parameters_missing_values(const " "Vector<size_t>&).\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif Vector<double> shape_parameters(2); shape_parameters[0] = calculate_asymmetry_missing_values(missing_values); shape_parameters[1] = calculate_kurtosis_missing_values(missing_values); return(shape_parameters); } /// Returns the box and whispers for a vector. template <class T> Vector<double> Vector<T>::calculate_box_plot() const { Vector<double> box_plots(5, 0.0); if(this->empty()) return box_plots; const Vector<double> quartiles = calculate_quartiles(); box_plots[0] = calculate_minimum(); box_plots[1] = quartiles[0]; box_plots[2] = quartiles[1]; box_plots[3] = quartiles[2]; box_plots[4] = calculate_maximum(); return(box_plots); } /// Returns the box and whispers for a vector when there are missing values. /// @param missing_indices Vector with the indices of the missing values. template <class T> Vector<double> Vector<T>::calculate_box_plot_missing_values(const Vector<size_t> & missing_indices) const { Vector<double> box_plots(5); Vector<double> quartiles = calculate_quartiles_missing_values(missing_indices); box_plots[0] = calculate_minimum_missing_values(missing_indices); box_plots[1] = quartiles[0]; box_plots[2] = quartiles[1]; box_plots[3] = quartiles[2]; box_plots[4] = calculate_maximum_missing_values(missing_indices); return(box_plots); } template <class T> size_t Vector<T>::calculate_sample_index_proportional_probability() const { const size_t this_size = this->size(); Vector<double> cumulative = this->calculate_cumulative(); const double sum = calculate_sum(); const double random = calculate_random_uniform(0.,sum); size_t selected_index = 0; for(size_t i = 0; i < this_size; i++) { if(i == 0 && random < cumulative[0]) { selected_index = i; break; } else if(random < cumulative[i] && random >= cumulative[i-1]) { selected_index = i; break; } } return selected_index; } /// Returns the vector norm. template <class T> double Vector<T>::calculate_L1_norm() const { // Control sentence(if debug) return calculate_absolute_value().calculate_sum(); } /// Returns the gradient of the vector norm. template <class T> Vector<T> Vector<T>::calculate_sign() const { const size_t this_size = this->size(); // Control sentence(if debug) Vector<T> sign_vector(this_size); for (size_t i = 0; i < this_size; i++) { if ((*this)[i] < 0) { sign_vector[i] = -1.0; } else if((*this)[i] > 0) { sign_vector[i] = 1.0; } else { throw logic_error("Error: Parameter " + to_string(i) + " is equal to zero: Non-derivative function"); } } return(sign_vector); } /// Returns the gradient of the vector norm. template <class T> Vector<T> Vector<T>::calculate_L1_norm_gradient() const { // Control sentence(if debug) return(calculate_sign()); } /// Returns the Hessian of the vector norm. template <class T> Matrix<T> Vector<T>::calculate_L1_norm_Hessian() const { const size_t this_size = this->size(); // Control sentence(if debug) Matrix<T> Hessian(this_size, this_size, 0); return(Hessian); } /// Returns the vector norm. template <class T> double Vector<T>::calculate_L2_norm() const { const size_t this_size = this->size(); // Control sentence(if debug) double norm = 0.0; for(size_t i = 0; i < this_size; i++) { norm += (*this)[i] *(*this)[i]; } return sqrt(norm); } /// Returns the gradient of the vector norm. template <class T> Vector<T> Vector<T>::calculate_L2_norm_gradient() const { const size_t this_size = this->size(); // Control sentence(if debug) Vector<T> gradient(this_size); const double norm = calculate_L2_norm(); if(norm == 0.0) { gradient.initialize(0.0); } else { gradient = (*this) / norm; } return(gradient); } /// Returns the Hessian of the vector norm. template <class T> Matrix<T> Vector<T>::calculate_L2_norm_Hessian() const { const size_t this_size = this->size(); // Control sentence(if debug) Matrix<T> Hessian(this_size, this_size); const double norm = calculate_L2_norm(); if(norm == 0.0) { Hessian.initialize(0.0); } else { Hessian = (*this).direct(*this)/(norm * norm * norm); } return(Hessian); } /// Returns the vector p-norm. template <class T> double Vector<T>::calculate_Lp_norm(const double &p) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ ostringstream buffer; if(p <= 0) { buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_p_norm(const double&) const method.\n" << "p value must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const size_t this_size = this->size(); // Control sentence(if debug) double norm = 0.0; for(size_t i = 0; i < this_size; i++) { norm += pow(fabs((*this)[i]), p); } norm = pow(norm, 1.0 / p); return(norm); } /// Returns the gradient of the vector norm. template <class T> Vector<double> Vector<T>::calculate_Lp_norm_gradient(const double &p) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ ostringstream buffer; if(p <= 0) { buffer << "OpenNN Exception: Vector Template.\n" << "Vector<double> calculate_p_norm_gradient(const double&) const " "method.\n" << "p value must be greater than zero.\n"; throw logic_error(buffer.str()); } #endif const size_t this_size = this->size(); Vector<double> gradient(this_size); const double p_norm = calculate_Lp_norm(p); if(p_norm == 0.0) { gradient.initialize(0.0); } else { for(size_t i = 0; i < this_size; i++) { gradient[i] = (*this)[i] * pow(fabs((*this)[i]), p - 2.0) / pow(p_norm, p - 1.0); } } return(gradient); } /// Returns this vector divided by its norm. template <class T> Vector<T> Vector<T>::calculate_normalized() const { const size_t this_size = this->size(); Vector<T> normalized(this_size); const double norm = calculate_L2_norm(); if(norm == 0.0) { normalized.initialize(0.0); } else { normalized = (*this) / norm; } return(normalized); } /// Returns the distance between the elements of this vector and the elements of /// another vector. /// @param other_vector Other vector. template <class T> double Vector<T>::calculate_euclidean_distance(const Vector<T> &other_vector) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_euclidean_distance(const Vector<T>&) const " "method.\n" << "Size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif double distance = 0.0; double error; for(size_t i = 0; i < this_size; i++) { error = (*this)[i] - other_vector[i]; distance += error * error; } return(sqrt(distance)); } template <class T> double Vector<T>::calculate_euclidean_weighted_distance(const Vector<T>& other_vector, const Vector<double>& weights) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_euclidean_weighted_distance(const Vector<T>&) const " "method.\n" << "Size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif double distance = 0.0; double error; for(size_t i = 0; i < this_size; i++) { error = (*this)[i] - other_vector[i]; distance += error * error * weights[i]; } return(sqrt(distance)); } template <class T> double Vector<T>::calculate_manhattan_distance(const Vector<T> &other_vector) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_manhattan_distance(const Vector<T>&) const " "method.\n" << "Size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif double distance = 0.0; double error; for(size_t i = 0; i < this_size; i++) { error = abs((*this)[i] - other_vector[i]); distance += error; } return(distance); } template <class T> double Vector<T>::calculate_manhattan_weighted_distance(const Vector<T>& other_vector, const Vector<double>& weights) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_manhattan_weighted_distance(const Vector<T>&) const " "method.\n" << "Size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif double distance = 0.0; double error; for(size_t i = 0; i < this_size; i++) { error = abs((*this)[i] - other_vector[i]); distance += error * weights[i]; // cout << distance << endl; } return(distance); } /// Returns the sum squared error between the elements of this vector and the /// elements of another vector. /// @param other_vector Other vector. template <class T> double Vector<T>::calculate_sum_squared_error(const Vector<double> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_sum_squared_error(const Vector<double>&) const " "method.\n" << "Size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif double sum_squared_error = 0.0; double error; for(size_t i = 0; i < this_size; i++) { error = (*this)[i] - other_vector[i]; sum_squared_error += error * error; } return(sum_squared_error); } /// Returns the sum squared error between the elements of this vector and the /// elements of a row of a matrix. /// @param matrix Matrix to compute the error. /// @param row_index Index of the row of the matrix. /// @param column_indices Indices of the columns of the matrix to evaluate. template <class T> double Vector<T>::calculate_sum_squared_error( const Matrix<T> &matrix, const size_t &row_index, const Vector<size_t> &column_indices) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); const size_t other_size = column_indices.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_sum_squared_error(const Matrix<T>&, const size_t&, const Vector<size_t>&) const method.\n" << "Size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif double sum_squared_error = 0.0; double error; const size_t size = column_indices.size(); for(size_t i = 0; i < size; i++) { error = (*this)[i] - matrix(row_index, column_indices[i]); sum_squared_error += error * error; } return(sum_squared_error); } /// Returns the Minkowski squared error between the elements of this vector and /// the elements of another vector. /// @param other_vector Other vector. /// @param Minkowski_parameter Minkowski exponent. template <class T> double Vector<T>::calculate_Minkowski_error(const Vector<double> &other_vector, const double &Minkowski_parameter) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ ostringstream buffer; if(this_size == 0) { buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_Minkowski_error(const Vector<double>&) const " "method.\n" << "Size must be greater than zero.\n"; throw logic_error(buffer.str()); } const size_t other_size = other_vector.size(); if(other_size != this_size) { buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_Minkowski_error(const Vector<double>&) const " "method.\n" << "Other size must be equal to this size.\n"; throw logic_error(buffer.str()); } // Control sentence if(Minkowski_parameter < 1.0 || Minkowski_parameter > 2.0) { buffer << "OpenNN Exception: Vector Template.\n" << "double calculate_Minkowski_error(const Vector<double>&) const " "method.\n" << "The Minkowski parameter must be comprised between 1 and 2\n"; throw logic_error(buffer.str()); } #endif double Minkowski_error = 0.0; for(size_t i = 0; i < this_size; i++) { Minkowski_error += pow(fabs((*this)[i] - other_vector[i]), Minkowski_parameter); } Minkowski_error = pow(Minkowski_error, 1.0 / Minkowski_parameter); return(Minkowski_error); } /// Calculates the linear regression parameters(intercept, slope and /// correlation) between another vector and this vector. /// It returns a linear regression parameters structure. /// @param other Other vector for the linear regression analysis. template <class T> LinearRegressionParameters<T> Vector<T>::calculate_linear_regression_parameters( const Vector<T> &x) const { const size_t n = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t x_size = x.size(); ostringstream buffer; if(x_size != n) { buffer << "OpenNN Exception: Vector Template.\n" << "LinearRegressionParameters<T> " "calculate_linear_regression_parameters(const Vector<T>&) const " "method.\n" << "Other size must be equal to this size.\n"; throw logic_error(buffer.str()); } #endif T s_x = 0; T s_y = 0; T s_xx = 0; T s_yy = 0; T s_xy = 0; for(size_t i = 0; i < n; i++) { s_x += x[i]; s_y += (*this)[i]; s_xx += x[i] * x[i]; s_yy += (*this)[i] *(*this)[i]; s_xy += x[i] *(*this)[i]; } LinearRegressionParameters<T> linear_regression_parameters; if(s_x == 0 && s_y == 0 && s_xx == 0 && s_yy == 0 && s_xy == 0) { linear_regression_parameters.intercept = 0.0; linear_regression_parameters.slope = 0.0; linear_regression_parameters.correlation = 1.0; } else { linear_regression_parameters.intercept = (s_y * s_xx - s_x * s_xy) /(n * s_xx - s_x * s_x); linear_regression_parameters.slope = (n * s_xy - s_x * s_y) /(n * s_xx - s_x * s_x); if(sqrt((n * s_xx - s_x * s_x) *(n * s_yy - s_y * s_y)) < 1.0e-12) { linear_regression_parameters.correlation = 1.0; } else { linear_regression_parameters.correlation = (n * s_xy - s_x * s_y) / sqrt((n * s_xx - s_x * s_x) *(n * s_yy - s_y * s_y)); } } return(linear_regression_parameters); } /// Returns a vector with the absolute values of the current vector. template <class T> Vector<T> Vector<T>::calculate_absolute_value() const { const size_t this_size = this->size(); Vector<T> absolute_value(this_size); for(size_t i = 0; i < this_size; i++) { if((*this)[i] > 0) { absolute_value[i] = (*this)[i]; } else { absolute_value[i] = -(*this)[i]; } } return(absolute_value); } /// Sets the elements of the vector to their absolute values. template <class T> void Vector<T>::apply_absolute_value() { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < 0) { (*this)[i] = -(*this)[i]; } } } /// Returns a vector with the bounded elements from below of the current vector. /// @param lower_bound Lower bound values. template <class T> Vector<T> Vector<T>::calculate_lower_bounded(const T &lower_bound) const { const size_t this_size = this->size(); Vector<T> bounded_vector(this_size); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound) { bounded_vector[i] = lower_bound; } else { bounded_vector[i] = (*this)[i]; } } return(bounded_vector); } /// Returns a vector with the bounded elements from above of the current vector. /// @param lower_bound Lower bound values. template <class T> Vector<T> Vector<T>::calculate_lower_bounded(const Vector<T> &lower_bound) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t lower_bound_size = lower_bound.size(); if(lower_bound_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_lower_bounded(const Vector<T>&) const method.\n" << "Lower bound size must be equal to vector size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> bounded_vector(this_size); // Apply lower bound for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound[i]) { bounded_vector[i] = lower_bound[i]; } else { bounded_vector[i] = (*this)[i]; } } return(bounded_vector); } /// This method bounds the elements of the vector if they fall above an upper /// bound value. /// @param upper_bound Upper bound value. template <class T> Vector<T> Vector<T>::calculate_upper_bounded(const T &upper_bound) const { const size_t this_size = this->size(); Vector<T> bounded_vector(this_size); for(size_t i = 0; i < this_size; i++) { if((*this)[i] > upper_bound) { bounded_vector[i] = upper_bound; } else { bounded_vector[i] = (*this)[i]; } } return(bounded_vector); } /// This method bounds the elements of the vector if they fall above their /// corresponding upper bound values. /// @param upper_bound Upper bound values. template <class T> Vector<T> Vector<T>::calculate_upper_bounded(const Vector<T> &upper_bound) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t upper_bound_size = upper_bound.size(); if(upper_bound_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_upper_bounded(const Vector<T>&) const method.\n" << "Upper bound size must be equal to vector size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> bounded_vector(this_size); // Apply upper bound for(size_t i = 0; i < this_size; i++) { if((*this)[i] > upper_bound[i]) { bounded_vector[i] = upper_bound[i]; } else { bounded_vector[i] = (*this)[i]; } } return(bounded_vector); } /// This method bounds the elements of the vector if they fall above or below /// their lower or upper /// bound values, respectively. /// @param lower_bound Lower bound value. /// @param upper_bound Upper bound value. template <class T> Vector<T> Vector<T>::calculate_lower_upper_bounded(const T &lower_bound, const T &upper_bound) const { const size_t this_size = this->size(); Vector<T> bounded_vector(this_size); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound) { bounded_vector[i] = lower_bound; } else if((*this)[i] > upper_bound) { bounded_vector[i] = upper_bound; } else { bounded_vector[i] = (*this)[i]; } } return(bounded_vector); } /// This method bounds the elements of the vector if they fall above or below /// their corresponding lower or upper /// bound values, respectively. /// @param lower_bound Lower bound values. /// @param upper_bound Upper bound values. template <class T> Vector<T> Vector<T>::calculate_lower_upper_bounded(const Vector<T> &lower_bound, const Vector<T> &upper_bound) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t lower_bound_size = lower_bound.size(); const size_t upper_bound_size = upper_bound.size(); if(lower_bound_size != this_size || upper_bound_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> calculate_lower_upper_bounded(const Vector<T>&, const " "Vector<T>&) const method.\n" << "Lower and upper bound sizes must be equal to vector size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> bounded_vector(this_size); // Apply lower and upper bounds for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound[i]) { bounded_vector[i] = lower_bound[i]; } else if((*this)[i] > upper_bound[i]) { bounded_vector[i] = upper_bound[i]; } else { bounded_vector[i] = (*this)[i]; } } return(bounded_vector); } /// Sets the elements of the vector to a given value if they fall below that /// value. /// @param lower_bound Lower bound value. template <class T> void Vector<T>::apply_lower_bound(const T &lower_bound) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound) { (*this)[i] = lower_bound; } } } /// Sets the elements of the vector to given values if they fall below that /// values. /// @param lower_bound Lower bound values. template <class T> void Vector<T>::apply_lower_bound(const Vector<T> &lower_bound) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound[i]) { (*this)[i] = lower_bound[i]; } } } /// Sets the elements of the vector to a given value if they fall above that /// value. /// @param upper_bound Upper bound value. template <class T> void Vector<T>::apply_upper_bound(const T &upper_bound) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] > upper_bound) { (*this)[i] = upper_bound; } } } /// Sets the elements of the vector to given values if they fall above that /// values. /// @param upper_bound Upper bound values. template <class T> void Vector<T>::apply_upper_bound(const Vector<T> &upper_bound) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] > upper_bound[i]) { (*this)[i] = upper_bound[i]; } } } /// Sets the elements of the vector to a given lower bound value if they fall /// below that value, /// or to a given upper bound value if they fall above that value. /// @param lower_bound Lower bound value. /// @param upper_bound Upper bound value. template <class T> void Vector<T>::apply_lower_upper_bounds(const T &lower_bound, const T &upper_bound) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound) { (*this)[i] = lower_bound; } else if((*this)[i] > upper_bound) { (*this)[i] = upper_bound; } } } /// Sets the elements of the vector to given lower bound values if they fall /// below that values, /// or to given upper bound values if they fall above that values. /// @param lower_bound Lower bound values. /// @param upper_bound Upper bound values. template <class T> void Vector<T>::apply_lower_upper_bounds(const Vector<T> &lower_bound, const Vector<T> &upper_bound) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { if((*this)[i] < lower_bound[i]) { (*this)[i] = lower_bound[i]; } else if((*this)[i] > upper_bound[i]) { (*this)[i] = upper_bound[i]; } } } /// Returns the vector of the indices of the vector sorted by less ranks. template <class T> Vector<size_t> Vector<T>::sort_ascending_indices() const { Vector<size_t> indices(this->size()); #ifdef __Cpp11__ const Vector<size_t> less_rank = this->calculate_less_rank(); for(size_t i = 0; i < this->size(); i++) { indices[less_rank[i]] = i; } #else indices.initialize_sequential(); sort(indices.begin(), indices.end(), [this](size_t i1, size_t i2) {return(*this)[i1] <(*this)[i2];}); #endif return(indices); } template <class T> Vector<T> Vector<T>::sort_ascending_values() const { Vector<T> sorted(*this); sort(sorted.begin(), sorted.end()); return sorted; } template <class T> Vector<size_t> Vector<T>::calculate_lower_indices(const size_t& indices_number) const { return sort_ascending_indices().get_subvector(0,indices_number-1); } template <class T> Vector<T> Vector<T>::calculate_lower_values(const size_t& indices_number) const { return sort_ascending_values().get_subvector(0,indices_number-1); } /// Returns the vector of the indices of the vector sorted by greater ranks. template <class T> Vector<size_t> Vector<T>::sort_descending_indices() const { Vector<size_t> indices(this->size()); #ifdef __Cpp11__ const Vector<size_t> greater_rank = this->calculate_greater_rank(); for(size_t i = 0; i < this->size(); i++) { indices[greater_rank[i]] = i; } #else indices.initialize_sequential(); sort(indices.begin(), indices.end(), [this](size_t i1, size_t i2) {return(*this)[i1] >(*this)[i2];}); #endif return(indices); } template <class T> Vector<T> Vector<T>::sort_descending_values() const { Vector<T> sorted(*this); sort(sorted.begin(), sorted.end()); return sorted.get_reverse(); } /// Returns a vector with the rank of the elements of this vector. /// The smallest element will have rank 0, and the greatest element will have /// size-1. /// That is, small values correspond with small ranks. template <class T> Vector<size_t> Vector<T>::calculate_less_rank() const { const size_t this_size = this->size(); Vector<size_t> rank(this_size); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), less<double>()); Vector<size_t> previous_rank; previous_rank.set(this_size, -1); // #pragma omp parallel for schedule(dynamic) for(int i = 0; i < this_size; i++) { for(int j = 0; j < this_size; j++) { if(previous_rank.contains(static_cast<size_t>(j))) { continue; } if((*this)[i] == sorted_vector[j]) { rank[static_cast<size_t>(i)] = static_cast<size_t>(j); previous_rank[static_cast<size_t>(i)] = static_cast<size_t>(j); break; } } } return(rank); } /// Returns a vector with the rank of the elements of this vector. /// The smallest element will have rank 0, and the greatest element will have /// size-1. /// That is, small values correspond with small ranks. /// Ties are assigned the mean of the ranks. template <class T> Vector<double> Vector<T>::calculate_less_rank_with_ties() const { Vector<double> indices_this = this->calculate_less_rank().to_double_vector(); const Vector<double> this_unique = this->get_unique_elements(); const Vector<size_t> this_unique_frecuency = this->count_unique(); const size_t n = this->size(); //#pragma omp parallel for for(int i = 0; i < static_cast<int>(this_unique.size()); i++) { if(this_unique_frecuency[static_cast<size_t>(i)] > 1) { const double unique = this_unique[static_cast<size_t>(i)]; Vector<double> indices(this_unique_frecuency[static_cast<size_t>(i)]); for(size_t j = 0; j < n; j++) { if((*this)[j] == unique) { indices.push_back(indices_this[j]); } } const double mean_indice = indices.calculate_mean(); for(size_t j = 0; j < n; j++) { if((*this)[j] == unique) { indices_this[j] = mean_indice; } } } } return indices_this; } /// Returns a vector with the rank of the elements of this vector. /// The smallest element will have rank size-1, and the greatest element will /// have 0. /// That is, small values correspond to big ranks. template <class T> Vector<size_t> Vector<T>::calculate_greater_rank() const { const size_t this_size = this->size(); Vector<size_t> rank(this_size); Vector<T> sorted_vector(*this); sort(sorted_vector.begin(), sorted_vector.end(), greater<T>()); Vector<size_t> previous_rank; previous_rank.set(this_size, -1); for(size_t i = 0; i < this_size; i++) { for(size_t j = 0; j < this_size; j++) { if(previous_rank.contains(j)) { continue; } if((*this)[i] == sorted_vector[j]) { rank[i] = j; previous_rank[i] = j; break; } } } return(rank); } // @todo template <class T> Vector<size_t> Vector<T>::calculate_greater_indices() const { const size_t this_size = this->size(); Vector<size_t> y(this_size); size_t n(0); generate(y.begin(), y.end(), [&]{ return n++; }); sort(y.begin(), y.end(), [&](size_t i1, size_t i2) { return(*this)[i1] > (*this)[i2]; } ); return(y); } /// Returns a vector sorted according to a given rank. /// @param rank Given rank. template <class T> Vector<T> Vector<T>::sort_rank(const Vector<size_t>& rank) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t rank_size = rank.size(); if(this_size != rank_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> sort_rank(const Vector<size_t>&) const.\n" << "Sizes of vectors are " << this_size << " and " << rank_size << " and they must be the same.\n"; throw logic_error(buffer.str()); } #endif Vector<T> sorted_vector(this_size); for(size_t i = 0; i < this_size; i++) { sorted_vector[i] = (*this)[rank[i]]; } return sorted_vector; } template <class T> inline Vector<T> Vector<T>::operator= (const initializer_list<T>& list) const { return Vector<T>(list); } /// Sum vector+scalar arithmetic operator. /// @param scalar Scalar value to be added to this vector. template <class T> inline Vector<T> Vector<T>::operator+ (const T &scalar) const { const size_t this_size = this->size(); Vector<T> sum(this_size); transform(this->begin(), this->end(), sum.begin(), bind2nd(plus<T>(), scalar)); return(sum); } /// Sum vector+vector arithmetic operator. /// @param other_vector Vector to be added to this vector. template <class T> inline Vector<T> Vector<T>::operator+ (const Vector<T> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> operator + (const Vector<T>) const.\n" << "Sizes of vectors are " << this_size << " and " << other_size << " and they must be the same.\n"; throw logic_error(buffer.str()); } #endif Vector<T> sum(this_size); transform(this->begin(), this->end(), other_vector.begin(), sum.begin(), plus<T>()); return(sum); } /// Difference vector-scalar arithmetic operator. /// @param scalar Scalar value to be subtracted to this vector. template <class T> inline Vector<T> Vector<T>::operator-(const T &scalar) const { const size_t this_size = this->size(); Vector<T> difference(this_size); transform(this->begin(), this->end(), difference.begin(), bind2nd(minus<T>(), scalar)); return(difference); } /// Difference vector-vector arithmetic operator. /// @param other_vector vector to be subtracted to this vector. template <class T> inline Vector<T> Vector<T>::operator-(const Vector<T> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> operator -(const Vector<T>&) const.\n" << "Sizes of vectors are " << this_size << " and " << other_size << " and they must be the same.\n"; throw logic_error(buffer.str()); } #endif Vector<T> difference(this_size); transform(this->begin(), this->end(), other_vector.begin(), difference.begin(), minus<T>()); return(difference); } /// Product vector*scalar arithmetic operator. /// @param scalar Scalar value to be multiplied to this vector. template <class T> Vector<T> Vector<T>::operator*(const T &scalar) const { const size_t this_size = this->size(); Vector<T> product(this_size); transform(this->begin(), this->end(), product.begin(), bind2nd(multiplies<T>(), scalar)); return(product); } /// Element by element product vector*vector arithmetic operator. /// @param other_vector vector to be multiplied to this vector. template <class T> inline Vector<T> Vector<T>::operator*(const Vector<T> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> operator *(const Vector<T>&) const.\n" << "Size of other vector(" << other_size << ") must be equal to size of this vector(" << this_size << ").\n"; throw logic_error(buffer.str()); } #endif Vector<T> product(this_size); transform(this->begin(), this->end(), other_vector.begin(), product.begin(), multiplies<T>()); return(product); } /// Element by row product vector*matrix arithmetic operator. /// @param matrix matrix to be multiplied to this vector. template <class T> Matrix<T> Vector<T>::operator*(const Matrix<T> &matrix) const { const size_t rows_number = matrix.get_rows_number(); const size_t columns_number = matrix.get_columns_number(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(rows_number != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> operator *(const Matrix<T>&) const.\n" << "Number of matrix rows(" << rows_number << ") must be equal to vector size(" << this_size << ").\n"; throw logic_error(buffer.str()); } #endif Matrix<T> product(rows_number, columns_number); for(size_t i = 0; i < rows_number; i++) { for(size_t j = 0; j < columns_number; j++) { product(i, j) = (*this)[i] * matrix(i, j); } } return(product); } /// Returns the dot product of this vector with a matrix. /// The number of rows of the matrix must be equal to the size of the vector. /// @param matrix matrix to be multiplied to this vector. template <class T> Vector<double> Vector<T>::dot(const Matrix<T> &matrix) const { const size_t rows_number = matrix.get_rows_number(); const size_t columns_number = matrix.get_columns_number(); const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(rows_number != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> dot(const Matrix<T>&) const method.\n" << "Matrix number of rows (" << rows_number << ") must be equal to vector size (" << this_size << ").\n"; throw logic_error(buffer.str()); } #endif Vector<double> product(columns_number, 0.0); for(size_t j = 0; j < columns_number; j++) { for(size_t i = 0; i < rows_number; i++) { product[j] += (*this)[i]*matrix(i,j); } } // const Eigen::Map<Eigen::VectorXd> vector_eigen((double *)this->data(), this_size); // const Eigen::Map<Eigen::MatrixXd> matrix_eigen((double *)matrix.data(), rows_number, columns_number); // Eigen::Map<Eigen::VectorXd> product_eigen(product.data(), columns_number); // product_eigen = vector_eigen.transpose() * matrix_eigen; return(product); } /// Dot product vector*vector arithmetic operator. /// @param other_vector vector to be multiplied to this vector. template <class T> inline double Vector<T>::dot(const Vector<double> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Type dot(const Vector<T>&) const method.\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif // const Eigen::Map<Eigen::VectorXd> // this_vector_eigen((double*)this->data(), this_size); // const Eigen::Map<Eigen::VectorXd> // other_vector_eigen((double*)other_vector.data(), this_size); // return(this_vector_eigen.dot(other_vector_eigen)); double dot_product = 0.0; for(size_t i = 0; i < this_size; i++) { dot_product += (*this)[i] * other_vector[i]; } return(dot_product); } /// Outer product vector*vector arithmetic operator. /// @param other_vector vector to be multiplied to this vector. template <class T> Matrix<T> Vector<T>::direct(const Vector<T> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Matrix<T> direct(const Vector<T>&) const method.\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif Matrix<T> direct(this_size, this_size); #pragma omp parallel for if(this_size > 1000) for(int i = 0; i < static_cast<int>(this_size); i++) { for(size_t j = 0; j < this_size; j++) { direct(i, j) = (*this)[i] * other_vector[j]; } } return(direct); } /// Cocient vector/scalar arithmetic operator. /// @param scalar Scalar value to be divided to this vector. template <class T> Vector<T> Vector<T>::operator/(const T &scalar) const { const size_t this_size = this->size(); Vector<T> cocient(this_size); transform(this->begin(), this->end(), cocient.begin(), bind2nd(divides<T>(), scalar)); return(cocient); } /// Cocient vector/vector arithmetic operator. /// @param other_vector vector to be divided to this vector. template <class T> Vector<T> Vector<T>::operator/(const Vector<T> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> operator /(const Vector<T>&) const.\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif Vector<T> cocient(this_size); transform(this->begin(), this->end(), other_vector.begin(), cocient.begin(), divides<T>()); return(cocient); } /// Scalar sum and assignment operator. /// @param value Scalar value to be added to this vector. template <class T> void Vector<T>::operator+= (const T &value) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] + value; } } /// Vector sum and assignment operator. /// @param other_vector Vector to be added to this vector. template <class T> void Vector<T>::operator+= (const Vector<T> &other_vector) { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void operator += (const Vector<T>&).\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] + other_vector[i]; } } /// Scalar rest and assignment operator. /// @param value Scalar value to be subtracted to this vector. template <class T> void Vector<T>::operator-= (const T &value) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] - value; } } /// Vector rest and assignment operator. /// @param other_vector Vector to be subtracted to this vector. template <class T> void Vector<T>::operator-= (const Vector<T> &other_vector) { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void operator -= (const Vector<T>&).\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] - other_vector[i]; } } /// Scalar product and assignment operator. /// @param value Scalar value to be multiplied to this vector. template <class T> void Vector<T>::operator*= (const T &value) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] * value; } } /// Vector product and assignment operator. /// @param other_vector Vector to be multiplied to this vector. template <class T> void Vector<T>::operator*= (const Vector<T> &other_vector) { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void operator *= (const Vector<T>&).\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] * other_vector[i]; } } /// Scalar division and assignment operator. /// @param value Scalar value to be divided to this vector. template <class T> void Vector<T>::operator/= (const T &value) { const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] / value; } } /// Vector division and assignment operator. /// @param other_vector Vector to be divided to this vector. template <class T> void Vector<T>::operator/= (const Vector<T> &other_vector) { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t other_size = other_vector.size(); if(other_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void operator /= (const Vector<T>&).\n" << "Both vector sizes must be the same.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { (*this)[i] = (*this)[i] / other_vector[i]; } } /// Sets all the negative elements in the vector to zero. template <class T> Vector<T> Vector<T>::filter_positive() const { Vector<T> new_vector(*this); for(size_t i = 0; i < this->size(); i++) { if(new_vector[i] < 0) { new_vector[i] = 0; } } return new_vector; } /// Sets all the positive elements in the vector to zero. template <class T> Vector<T> Vector<T>::filter_negative() const { Vector<T> new_vector(*this); for(size_t i = 0; i < this->size(); i++) { if(new_vector[i] > 0) { new_vector[i] = 0; } } } template <class T> size_t Vector<T>::count_dates(const size_t& start_day, const size_t& start_month, const size_t& start_year, const size_t& end_day, const size_t& end_month, const size_t& end_year) const { struct tm start; start.tm_mday = static_cast<int>(start_day); start.tm_mon = static_cast<int>(start_month) - 1; start.tm_year = static_cast<int>(start_year) - 1900; const time_t start_date = mktime(&start) + 3600*24; struct tm end; end.tm_mday = static_cast<int>(end_day); end.tm_mon = static_cast<int>(end_month) - 1; end.tm_year = static_cast<int>(end_year) - 1900; const time_t end_date = mktime(&end) + 3600*24; size_t count = 0; for(size_t i = 0; i < this->size(); i++) { if((*this)[i] >= start_date &&(*this)[i] <= end_date) { count++; } } return(count); } /// Returns the indices of the timestamp elements which fall between two given dates. /// @param start_month Start month. /// @param start_year Start year. /// @param end_month End month. /// @param end_year End year. template <class T> Vector<size_t> Vector<T>::filter_dates(const size_t& start_day, const size_t& start_month, const size_t& start_year, const size_t& end_day, const size_t& end_month, const size_t& end_year) const { const size_t new_size = count_dates(start_day, start_month, start_year, end_day, end_month, end_year); Vector<size_t> indices(new_size); struct tm start; start.tm_mday = static_cast<int>(start_day); start.tm_mon = static_cast<int>(start_month) - 1; start.tm_year = static_cast<int>(start_year) - 1900; const time_t start_date = mktime(&start) + 3600*24; struct tm end; end.tm_mday = static_cast<int>(end_day); end.tm_mon = static_cast<int>(end_month) - 1; end.tm_year = static_cast<int>(end_year) - 1900; const time_t end_date = mktime(&end) + 3600*24; size_t index = 0; for(size_t i = 0; i < this->size(); i++) { if((*this)[i] >= start_date && (*this)[i] <= end_date) { indices[index] = i; index++; } } return(indices); } /// Calculates the outliers in the vector using the Tukey's algorithm, /// and returns the indices of that elements. /// @param cleaning_parameter Cleaning parameter in the Tukey's method. The default value is 1.5. template <class T> Vector<size_t> Vector<T>::calculate_Tukey_outliers(const double& cleaning_parameter) const { Vector<size_t> outliers_indices; if(this->is_binary()) { return outliers_indices; } const size_t this_size = this->size(); Vector<double> box_plot; double interquartile_range; box_plot = calculate_box_plot(); if(fabs(box_plot[3] - box_plot[1]) < numeric_limits<double>::epsilon()) { return outliers_indices; } else { interquartile_range = abs((box_plot[3] - box_plot[1])); } for(size_t i = 0; i < this_size; i++) { if((*this)[i] <(box_plot[1] - cleaning_parameter*interquartile_range) || (*this)[i] >(box_plot[3] + cleaning_parameter*interquartile_range)) { outliers_indices.push_back(i); } } return(outliers_indices); } template <class T> Vector<size_t> Vector<T>::calculate_Tukey_outliers_iterative(const double& cleaning_parameter) const { const Vector<T> id(1,1,this->size()); Matrix<T> data(this->size(),2); data.set_column(0,id); data.set_column(1,(*this)); Vector<T> id_to_remove; for(;;) { const Vector<T> iteration_id = data.get_column(0); const Vector<size_t> iteration_indices = data.get_column(1).calculate_Tukey_outliers(cleaning_parameter); if(iteration_indices.empty()) break; data = data.delete_rows(iteration_indices); id_to_remove = id_to_remove.assemble(iteration_id.get_subvector(iteration_indices)); } const Vector<size_t> tukey_indices = id.calculate_equal_to_indices(id_to_remove).to_size_t_vector(); return tukey_indices; } template <class T> Vector<size_t> Vector<T>::calculate_histogram_outliers(const size_t& bins_number, const size_t& minimum_frequency) const { Vector<size_t> indices; if(this->is_binary()) return indices; const Histogram<T> histogram = calculate_histogram(bins_number); for(size_t i = 0; i < bins_number; i++) { if(histogram.frequencies[i] <= minimum_frequency) { const Vector<size_t> bin_indices = calculate_between_indices(histogram.minimums[i], histogram.maximums[i]); indices = indices.assemble(bin_indices); } } return(indices); } template <class T> Vector<size_t> Vector<T>::calculate_histogram_outliers_iterative(const size_t& bins_number, const size_t& minimum_frequency) const { const Vector<T> id(1,1,this->size()); Matrix<T> data(this->size(),2); data.set_column(0,id); data.set_column(1,(*this)); Vector<T> id_to_remove; for(;;) { const Vector<T> iteration_id = data.get_column(0); const Vector<size_t> iteration_indices = data.get_column(1).calculate_histogram_outliers(bins_number, minimum_frequency); if(iteration_indices.empty()) break; data = data.delete_rows(iteration_indices); id_to_remove = id_to_remove.assemble(iteration_id.get_subvector(iteration_indices)); } const Vector<size_t> histogram_indices = id.calculate_equal_to_indices(id_to_remove).to_size_t_vector(); return histogram_indices; } /* template <class T> Vector<size_t> Vector<T>::calculate_histogram_outliers(const size_t& bins_number, const double& minimum_percentage) const { Vector<size_t> indices; if(this->is_binary()) return indices; const Histogram<T> histogram = calculate_histogram(bins_number); const size_t total = this->size(); Vector<double> percentages(bins_number); for(size_t i = 0; i < bins_number; i++) { percentages[i] = static_cast<double>(histogram.frequencies[i] * 100.0) /static_cast<double>(total); } for(size_t i = 0; i < bins_number; i++) { if(percentages[i] <= minimum_percentage) { const Vector<size_t> bin_indices = calculate_between_indices(histogram.minimums[i], histogram.maximums[i]); indices = indices.assemble(bin_indices); } } return(indices); } template <class T> Vector<size_t> Vector<T>::calculate_histogram_outliers_iterative(const size_t& bins_number, const double& minimum_percentage) const { const Vector<T> id(1,1,this->size()); Matrix<T> data(this->size(),2); cout << "1" << endl; data.set_column(0,id); data.set_column(1,(*this)); Vector<T> id_to_remove; cout << "2" << endl; for(;;) { const Vector<T> iteration_id = data.get_column(0); const Vector<size_t> iteration_indices = data.get_column(1).calculate_histogram_outliers(bins_number, minimum_percentage); if(iteration_indices.empty()) break; cout << data.get_rows_number() << " " << iteration_indices.size() << endl; data = data.delete_rows(iteration_indices); id_to_remove = id_to_remove.assemble(iteration_id.get_subvector(iteration_indices)); } cout << "3" << endl; const Vector<size_t> histogram_indices = id.calculate_equal_to_indices(id_to_remove).to_size_t_vector(); return histogram_indices; } */ template <class T> Vector<T> Vector<T>::calculate_scaled_minimum_maximum() const { const double minimum = calculate_minimum(); const double maximum = calculate_maximum(); const size_t this_size = this->size(); Vector<T> new_vector(this_size); for(size_t i = 0; i < this_size; i++) { new_vector[i] = 2.0 *((*this)[i] - minimum) /(maximum - minimum) - 1.0; } return new_vector; } template <class T> Vector<T> Vector<T>::calculate_scaled_minimum_maximum_0_1() const { const double minimum = calculate_minimum(); const double maximum = calculate_maximum(); if(maximum-minimum < numeric_limits<double>::min()) { return (*this); } const size_t this_size = this->size(); Vector<T> normalized(this_size); for(size_t i = 0; i < this_size; i++) { normalized[i] = ((*this)[i] - minimum)/(maximum - minimum); } return(normalized); } /// Normalizes the elements of this vector using the minimum and maximum method. /// @param minimum Minimum value for the scaling. /// @param maximum Maximum value for the scaling. template <class T> void Vector<T>::scale_minimum_maximum(const T &minimum, const T &maximum) { if(maximum - minimum < numeric_limits<double>::min()) { return; } const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = 2.0 *((*this)[i] - minimum) /(maximum - minimum) - 1.0; } } /// Normalizes the elements of this vector using the minimum and maximum method. /// @param statistics Statistics structure, which contains the minimum and /// maximum values for the scaling. template <class T> void Vector<T>::scale_minimum_maximum(const Statistics<T> &statistics) { scale_minimum_maximum(statistics.minimum, statistics.maximum); } /// Normalizes the elements of the vector with the minimum and maximum method. /// The minimum and maximum values used are those calculated from the vector. /// It also returns the statistics from the vector. template <class T> Statistics<T> Vector<T>::scale_minimum_maximum() { const Statistics<T> statistics = calculate_statistics(); scale_minimum_maximum(statistics); return(statistics); } /// Normalizes the elements of this vector using the mean and standard deviation /// method. /// @param mean Mean value for the scaling. /// @param standard_deviation Standard deviation value for the scaling. template <class T> void Vector<T>::scale_mean_standard_deviation(const T &mean, const T &standard_deviation) { if(standard_deviation < numeric_limits<double>::min()) { return; } const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = ((*this)[i] - mean) / standard_deviation; } } /// Normalizes the elements of this vector using the mean and standard deviation /// method. /// @param statistics Statistics structure, /// which contains the mean and standard deviation values for the scaling. template <class T> void Vector<T>::scale_mean_standard_deviation(const Statistics<T> &statistics) { scale_mean_standard_deviation(statistics.mean, statistics.standard_deviation); } /// Normalizes the elements of the vector with the mean and standard deviation /// method. /// The values used are those calculated from the vector. /// It also returns the statistics from the vector. template <class T> Statistics<T> Vector<T>::scale_mean_standard_deviation() { const Statistics<T> statistics = calculate_statistics(); scale_mean_standard_deviation(statistics); return(statistics); } /// Normalizes the elements of this vector using standard deviationmethod. /// @param standard_deviation Standard deviation value for the scaling. template <class T> void Vector<T>::scale_standard_deviation(const T &standard_deviation) { if(standard_deviation < numeric_limits<double>::min()) { return; } const size_t this_size = this->size(); for(size_t i = 0; i < this_size; i++) { (*this)[i] = ((*this)[i]) / standard_deviation; } } /// Normalizes the elements of this vector using standard deviation method. /// @param statistics Statistics structure, /// which contains standard deviation value for the scaling. template <class T> void Vector<T>::scale_standard_deviation(const Statistics<T> &statistics) { scale_standard_deviation(statistics.standard_deviation); } /// Normalizes the elements of the vector with the standard deviation method. /// The values used are those calculated from the vector. /// It also returns the statistics from the vector. template <class T> Statistics<T> Vector<T>::scale_standard_deviation() { const Statistics<T> statistics = calculate_statistics(); scale_standard_deviation(statistics); return(statistics); } /// Scales the vector elements with given standard deviation values. /// It updates the data in the vector. /// The size of the standard deviation vector must be equal to the /// size of the vector. /// @param standard_deviation Standard deviation values. template <class T> void Vector<T>::scale_standard_deviation(const Vector<T> &standard_deviation) { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t standard_deviation_size = standard_deviation.size(); if(standard_deviation_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "void scale_standard_deviation(const Vector<T>&, const " "Vector<T>&) method.\n" << "Size of standard deviation vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif // Rescale data #pragma omp parallel for for(int i = 0; i < this_size; i++) { if(standard_deviation[i] < numeric_limits<double>::min()) { // cout << "OpenNN Warning: Vector class.\n" // << "void scale_mean_standard_deviation(const Vector<T>&, const " // "Vector<T>&) method.\n" // << "Standard deviation of variable " << i << " is zero.\n" // << "Those elements won't be scaled.\n"; // Do nothing } else { (*this)[i] = ((*this)[i]) / standard_deviation[i]; } } } /// Returns a vector with the scaled elements of this vector acording to the /// minimum and maximum method. /// The size of the minimum and maximum vectors must be equal to the size of the /// vector. /// @param minimum Minimum values. /// @param maximum Maximum values. template <class T> Vector<T> Vector<T>::calculate_scaled_minimum_maximum(const Vector<T> &minimum, const Vector<T> &maximum) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t minimum_size = minimum.size(); if(minimum_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "Vector<T> calculate_scaled_minimum_maximum(const Vector<T>&, " "const Vector<T>&) const method.\n" << "Size of minimum vector must be equal to size.\n"; throw logic_error(buffer.str()); } const size_t maximum_size = maximum.size(); if(maximum_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "Vector<T> calculate_scaled_minimum_maximum(const Vector<T>&, " "const Vector<T>&) const method.\n" << "Size of maximum vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> scaled_minimum_maximum(this_size); // Rescale data for(size_t i = 0; i < this_size; i++) { if(maximum[i] - minimum[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector class.\n" << "Vector<T> calculate_scaled_minimum_maximum(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Minimum and maximum values of variable " << i << " are equal.\n" << "Those elements won't be scaled.\n"; scaled_minimum_maximum[i] = (*this)[i]; } else { scaled_minimum_maximum[i] = 2.0 *((*this)[i] - minimum[i]) /(maximum[i] - minimum[i]) - 1.0; } } return(scaled_minimum_maximum); } template <class T> Vector<T> Vector<T>::calculate_scaled_mean_standard_deviation() const { const double mean = calculate_mean(); const double standard_deviation = calculate_standard_deviation(); if(standard_deviation < numeric_limits<double>::min()) { return (*this); } const size_t this_size = this->size(); Vector<T> normalized(this_size); for(size_t i = 0; i < this_size; i++) { normalized[i] = ((*this)[i] - mean)/standard_deviation; } return(normalized); } /// Returns a vector with the scaled elements of this vector acording to the /// mean and standard deviation method. /// The size of the mean and standard deviation vectors must be equal to the /// size of the vector. /// @param mean Mean values. /// @param standard_deviation Standard deviation values. template <class T> Vector<T> Vector<T>::calculate_scaled_mean_standard_deviation( const Vector<T> &mean, const Vector<T> &standard_deviation) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ ostringstream buffer; const size_t mean_size = mean.size(); if(mean_size != this_size) { buffer << "OpenNN Exception: Vector template." << "Vector<T> calculate_scaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Size of mean vector must be equal to size.\n"; throw logic_error(buffer.str()); } const size_t standard_deviation_size = standard_deviation.size(); if(standard_deviation_size != this_size) { buffer << "OpenNN Exception: Vector template.\n" << "Vector<T> calculate_scaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Size of standard deviation vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> scaled_mean_standard_deviation(this_size); for(size_t i = 0; i < this_size; i++) { if(standard_deviation[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector template.\n" << "Vector<T> calculate_scaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Standard deviation of variable " << i << " is zero.\n" << "Those elements won't be scaled.\n"; scaled_mean_standard_deviation = (*this)[i]; } else { scaled_mean_standard_deviation[i] = (*this)[i] * standard_deviation[i] + mean[i]; } } return(scaled_mean_standard_deviation); } /// Returns a vector with the scaled elements of this vector acording to the /// standard deviation method. /// The size of the standard deviation vector must be equal to the /// size of the vector. /// @param standard_deviation Standard deviation values. template <class T> Vector<T> Vector<T>::calculate_scaled_standard_deviation(const Vector<T> &standard_deviation) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t standard_deviation_size = standard_deviation.size(); if(standard_deviation_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "Vector<T> calculate_scaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Size of standard deviation vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> scaled_standard_deviation(this_size); for(size_t i = 0; i < this_size; i++) { if(standard_deviation[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector template.\n" << "Vector<T> calculate_scaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Standard deviation of variable " << i << " is zero.\n" << "Those elements won't be scaled.\n"; scaled_standard_deviation = (*this)[i]; } else { scaled_standard_deviation[i] = (*this)[i] * standard_deviation[i]; } } return(scaled_standard_deviation); } /// Returns a vector with the unscaled elements of this vector acording to the /// minimum and maximum method. /// The size of the minimum and maximum vectors must be equal to the size of the /// vector. /// @param minimum Minimum values. /// @param maximum Maximum values. template <class T> Vector<T> Vector<T>::calculate_unscaled_minimum_maximum(const Vector<T> &minimum, const Vector<T> &maximum) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t minimum_size = minimum.size(); if(minimum_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "Vector<T> calculate_unscaled_minimum_maximum(const Vector<T>&, " "const Vector<T>&) const method.\n" << "Size of minimum vector must be equal to size.\n"; throw logic_error(buffer.str()); } const size_t maximum_size = maximum.size(); if(maximum_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "Vector<T> calculate_unscaled_minimum_maximum(const Vector<T>&, " "const Vector<T>&) const method.\n" << "Size of maximum vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> unscaled_minimum_maximum(this_size); for(size_t i = 0; i < this_size; i++) { if(maximum[i] - minimum[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector template.\n" << "Vector<T> calculate_unscaled_minimum_maximum(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Minimum and maximum values of variable " << i << " are equal.\n" << "Those elements won't be unscaled.\n"; unscaled_minimum_maximum[i] = (*this)[i]; } else { unscaled_minimum_maximum[i] = 0.5 *((*this)[i] + 1.0) *(maximum[i] - minimum[i]) + minimum[i]; } } return(unscaled_minimum_maximum); } /// Returns a vector with the unscaled elements of this vector acording to the /// mean and standard deviation method. /// The size of the mean and standard deviation vectors must be equal to the /// size of the vector. /// @param mean Mean values. /// @param standard_deviation Standard deviation values. template <class T> Vector<T> Vector<T>::calculate_unscaled_mean_standard_deviation( const Vector<T> &mean, const Vector<T> &standard_deviation) const { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t mean_size = mean.size(); if(mean_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "Vector<T> calculate_unscaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Size of mean vector must be equal to size.\n"; throw logic_error(buffer.str()); } const size_t standard_deviation_size = standard_deviation.size(); if(standard_deviation_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "Vector<T> calculate_unscaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Size of standard deviation vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> unscaled_mean_standard_deviation(this_size); for(size_t i = 0; i < this_size; i++) { if(standard_deviation[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector template.\n" << "Vector<T> calculate_unscaled_mean_standard_deviation(const " "Vector<T>&, const Vector<T>&) const method.\n" << "Standard deviation of variable " << i << " is zero.\n" << "Those elements won't be scaled.\n"; unscaled_mean_standard_deviation[i] = (*this)[i]; } else { unscaled_mean_standard_deviation[i] = (*this)[i] * standard_deviation[i] + mean[i]; } } return(unscaled_mean_standard_deviation); } /// Unscales the vector elements with given minimum and maximum values. /// It updates the vector elements. /// The size of the minimum and maximum vectors must be equal to the size of the /// vector. /// @param minimum Minimum values. /// @param maximum Maximum deviation values. template <class T> void Vector<T>::unscale_minimum_maximum(const Vector<T> &minimum, const Vector<T> &maximum) { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t minimum_size = minimum.size(); if(minimum_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "void unscale_minimum_maximum(const Vector<T>&, const " "Vector<T>&) method.\n" << "Size of minimum vector must be equal to size.\n"; throw logic_error(buffer.str()); } const size_t maximum_size = maximum.size(); if(maximum_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "void unscale_minimum_maximum(const Vector<T>&, const " "Vector<T>&) method.\n" << "Size of maximum vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { if(maximum[i] - minimum[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector template.\n" << "void unscale_minimum_maximum(const Vector<T>&, const " "Vector<T>&) method.\n" << "Minimum and maximum values of variable " << i << " are equal.\n" << "Those elements won't be unscaled.\n"; // Do nothing } else { (*this)[i] = 0.5 *((*this)[i] + 1.0) *(maximum[i] - minimum[i]) + minimum[i]; } } } /// Unscales the vector elements with given mean and standard deviation values. /// It updates the vector elements. /// The size of the mean and standard deviation vectors must be equal to the /// size of the vector. /// @param mean Mean values. /// @param standard_deviation Standard deviation values. template <class T> void Vector<T>::unscale_mean_standard_deviation( const Vector<T> &mean, const Vector<T> &standard_deviation) { const size_t this_size = this->size(); #ifdef __OPENNN_DEBUG__ const size_t mean_size = mean.size(); if(mean_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template." << "void unscale_mean_standard_deviation(const Vector<T>&, const " "Vector<T>&) method.\n" << "Size of mean vector must be equal to size.\n"; throw logic_error(buffer.str()); } const size_t standard_deviation_size = standard_deviation.size(); if(standard_deviation_size != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "void unscale_mean_standard_deviation(const Vector<T>&, const " "Vector<T>&) method.\n" << "Size of standard deviation vector must be equal to size.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < this_size; i++) { if(standard_deviation[i] < numeric_limits<double>::min()) { cout << "OpenNN Warning: Vector template.\n" << "void unscale_mean_standard_deviation(const Vector<T>&, " "const Vector<T>&) method.\n" << "Standard deviation of variable " << i << " is zero.\n" << "Those elements won't be scaled.\n"; // Do nothing } else { (*this)[i] = (*this)[i] * standard_deviation[i] + mean[i]; } } } template <class T> Vector<T> Vector<T>::calculate_reverse_scaling(void) const { const size_t this_size = this->size(); Vector<T> reverse_scaling_vector(this_size); reverse_scaling_vector[0] = 1; for(size_t i = 1; i < this_size; i++) { reverse_scaling_vector[i] = ((*this)[this_size-1]-(*this)[i])/((*this)[this_size-1]-(*this)[0]); } return reverse_scaling_vector; } template <class T> Vector<T> Vector<T>::calculate_scaling_between(const T& min_old, const T& max_old, const T& min_new, const T& max_new) const { const size_t this_size = this->size(); Vector<T> scaled_vector(this_size); for(size_t i = 0; i < this_size; i++) { scaled_vector[i] = min_new + (max_new-min_new) * ((*this)[i]-min_old) / (max_old-min_old); } return scaled_vector; } /// Returns a squared matrix in which the entries outside the main diagonal are /// all zero. /// The elements in the diagonal are the elements in this vector. template <class T> Matrix<T> Vector<T>::to_diagonal_matrix() const { const size_t this_size = this->size(); Matrix<T> matrix(this_size, this_size, 0.0); matrix.set_diagonal(*this); return(matrix); } template <class T> Vector<T> Vector<T>::get_subvector(const size_t& first_index, const size_t& last_index) const { #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(last_index >= this_size || first_index >= this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> get_subvector(const size_t&, const size_t&) const method.\n" << "Index is equal or greater than this size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> subvector(last_index-first_index + 1); for(size_t i = first_index; i < last_index+1; i++) { subvector[i-first_index] = (*this)[i]; } return(subvector); } /// Returns another vector whose elements are given by some elements of this /// vector. /// @param indices Indices of this vector whose elements are required. template <class T> Vector<T> Vector<T>::get_subvector(const Vector<size_t> &indices) const { const size_t new_size = indices.size(); if(new_size == 0) return Vector<T>(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); for(size_t i = 0; i < new_size; i++) { if(indices[i] > this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> get_subvector(const Vector<T>&) const method.\n" << "Index is equal or greater than this size.\n"; throw logic_error(buffer.str()); } } #endif Vector<T> subvector(new_size); for(size_t i = 0; i < new_size; i++) { subvector[i] = (*this)[indices[i]]; } return(subvector); } template <class T> Vector<T> Vector<T>::get_subvector(const Vector<bool>& selection) const { const Vector<size_t> indices = selection.calculate_equal_to_indices(true); return(get_subvector(indices)); } template <class T> Vector<T> Vector<T>::get_subvector_random(const size_t& new_size) const { if(new_size == this->size()) return Vector<T>(*this); Vector<T> new_vector(*this); random_shuffle(new_vector.begin(), new_vector.end()); return new_vector.get_first(new_size); } /// Returns a vector with the first n elements of this vector. /// @param elements_number Size of the new vector. template <class T> Vector<T> Vector<T>::get_first(const size_t &elements_number) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(elements_number > this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> get_first(const size_t&) const method.\n" << "Number of elements must be equal or greater than this size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> subvector(elements_number); for(size_t i = 0; i < elements_number; i++) { subvector[i] = (*this)[i]; } return(subvector); } /// Returns a vector with the last n elements of this vector. /// @param elements_number Size of the new vector. template <class T> Vector<T> Vector<T>::get_last(const size_t &elements_number) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(elements_number > this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> get_last(const size_t&) const method.\n" << "Number of elements must be equal or greater than this size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> subvector(elements_number); for(size_t i = 0; i < elements_number; i++) { subvector[i] = (*this)[i + this_size - elements_number]; } return(subvector); } /// Returns a vector with the integers of the vector. /// @param maximum_integers Maximum number of integers to get. template <class T> Vector<T> Vector<T>::get_integer_elements(const size_t& maximum_integers) const { const size_t this_size = this->size(); const size_t integers_number = this->count_integers(maximum_integers); Vector<T> integers(integers_number); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if(!integers.contains((*this)[i])) { integers[index] = (*this)[i]; index++; if(index > integers_number) { break; } } } return integers; } /// Returns a vector with the integers of the vector. /// @param missing_indices Indices of the instances with missing values. /// @param maximum_integers Maximum number of integers to get. template <class T> Vector<T> Vector<T>::get_integer_elements_missing_values(const Vector<size_t>& missing_indices, const size_t& maximum_integers) const { const size_t this_size = this->size(); const size_t integers_number = this->count_integers_missing_values(missing_indices, maximum_integers); Vector<T> integers(integers_number); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if(!missing_indices.contains(i)) { if(!integers.contains((*this)[i])) { integers[index] = (*this)[i]; index++; if(index > integers_number) { break; } } } } return integers; } /// Returns a Matrix in which column i is(this)^i /// @param order Maximum order. template <class T> Matrix<T> Vector<T>::get_power_matrix(const size_t& order) const { Matrix<T> power_matrix(this->size(),order); for(size_t i=1; i <=order; i++) { for(size_t j=0; j<this->size(); j++) { power_matrix(j,i-1) = pow((*this)[j],i); } } return power_matrix; } /// Loads the members of a vector from an data file. /// Please be careful with the file format, which is specified in the OpenNN /// manual. /// @param file_name Name of vector file. template <class T> void Vector<T>::load(const string &file_name) { ifstream file(file_name.c_str()); stringstream buffer; string line; while(file.good()) { getline(file, line); buffer << line; } istream_iterator<string> it(buffer); istream_iterator<string> end; const vector<string> results(it, end); const size_t new_size = static_cast<size_t>(results.size()); this->resize(new_size); file.clear(); file.seekg(0, ios::beg); // Read data for(size_t i = 0; i < new_size; i++) { file >>(*this)[i]; } file.close(); } /// Saves to a data file the elements of the vector. /// The file format is as follows: /// element_0 element_1 ... element_N-1 /// @param file_name Name of vector data file. template <class T> void Vector<T>::save(const string &file_name, const char& separator) const { ofstream file(file_name.c_str()); if(!file.is_open()) { ostringstream buffer; buffer << "OpenNN Exception: Vector template.\n" << "void save(const string&) const method.\n" << "Cannot open vector data file.\n"; throw logic_error(buffer.str()); } // Write file const size_t this_size = this->size(); if(this_size > 0) { file <<(*this)[0]; for(size_t i = 1; i < this_size; i++) { file << separator <<(*this)[i]; } file << endl; } // Close file file.close(); } /// Insert another vector starting from a given position. /// @param position Insertion position. /// @param other_vector Vector to be inserted. template <class T> void Vector<T>::tuck_in(const size_t &position, const Vector<T> &other_vector) { const size_t other_size = other_vector.size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(position + other_size > this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void insert(const size_t&, const Vector<T>&) const method.\n" << "Cannot tuck in vector.\n"; throw logic_error(buffer.str()); } #endif for(size_t i = 0; i < other_size; i++) { (*this)[position + i] = other_vector[i]; } } /// Returns a new vector with a new element inserted. /// @param index Position of the new element. /// @param value Value of the new element. template <class T> Vector<T> Vector<T>::insert_element(const size_t &index, const T &value) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(index > this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void insert_element(const size_t& index, const T& value) method.\n" << "Index is greater than vector size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> other_vector(*this); const auto it = other_vector.begin(); other_vector.insert(it+index, value); return(other_vector); } /// Returns a new vector where a given element of this vector is replaced by another vector. /// @param index Index of the element to be replaced. /// @param other_vector Replacement vector. template <class T> Vector<T> Vector<T>::replace_element(const size_t &index, const Vector<T> &other_vector) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(index > this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "void insert_element(const size_t& index, const T& value) method.\n" << "Index is greater than vector size.\n"; throw logic_error(buffer.str()); } #endif const size_t other_size = other_vector.size(); const size_t new_size = this_size - 1 + other_size; Vector<T> new_vector(new_size); for(size_t i = 0; i < index; i++) { new_vector[i] = (*this)[i]; } for(size_t i = index; i < index+other_size; i++) { new_vector[i] = other_vector[i-index]; } for(size_t i = index+other_size; i < new_size; i++) { new_vector[i] = (*this)[i+1-other_size]; } return(new_vector); } /// Returns a new vector where a given value has been replaced by another one. /// @param find Value to be replaced. /// @param replace Replacement value. template <class T> Vector<T> Vector<T>::replace_value(const T& find_value, const T& replace_value) const { Vector<T> new_vector(*this); replace(new_vector.begin(), new_vector.end(), find_value, replace_value); return(new_vector); } /// Returns a new vector where a given value has been replaced by another one. /// @param find Value to be found. /// @param replace Replacement value. template <class T> Vector<T> Vector<T>::replace_value_if_contains(const T& find, const T& replace) const { Vector<T> new_vector(*this); for(size_t i = 0; i < new_vector.size(); i++) { if((*this)[i].find(find) != string::npos) { new_vector[i] = replace; } } return(new_vector); } /// Splits a string slement into substrings wherever delimiter occurs, and returns the vector of those strings. /// If sep does not match anywhere in the string, this method returns a single-element vector containing this string. /// @param index Index of elements. /// @param delimiter Separator char. template <class T> Vector<string> Vector<T>::split_element(const size_t &index, const char &delimiter) const { const Vector<string> split(split_string((*this)[index], delimiter)); return(split); } /// Returns a new vector which is a copy of this vector but with a given element /// removed. /// Therefore, the size of the new vector is the size of this vector minus one. /// @param index Index of element to be removed. template <class T> Vector<T> Vector<T>::delete_index(const size_t &index) const { const size_t this_size = this->size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(index >= this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> remove_element(const size_t&) const method.\n" << "Index is equal or greater than vector size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> other_vector(this_size - 1); for(size_t i = 0; i < this_size; i++) { if(i < index) { other_vector[i] = (*this)[i]; } else if(i > index) { other_vector[i - 1] = (*this)[i]; } } return(other_vector); } /// Returns a new vector which is a copy of this vector but with a given elements /// removed. /// Therefore, the size of the new vector is the size of this vector minus the indices vector size. /// @param indices Vector with the indices of the elements to be removed. template <class T> Vector<T> Vector<T>::delete_indices(const Vector<size_t> &indices) const { const size_t this_size = this->size(); const size_t indices_size = indices.size(); // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ if(indices.calculate_maximum() >= this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> remove_elements(const Vector<size_t>&) const method.\n" << "Maximum index is equal or greater than vector size.\n"; throw logic_error(buffer.str()); } if(indices_size >= this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<T> remove_elements(const Vector<size_t>&) const method.\n" << "Number of indices to remove is equal or greater than vector size.\n"; throw logic_error(buffer.str()); } #endif Vector<T> other_vector(this_size - indices_size); size_t index = 0; for(size_t i = 0; i < this_size; i++) { if(!indices.contains(i)) { other_vector[index] = (*this)[i]; index++; } } return(other_vector); } /// Construct a copy of this vector but without a certain value. /// Note that the new vector might have a different size than this vector. /// @param value Value of elements to be removed. template <class T> Vector<T> Vector<T>::delete_value(const T &value) const { const size_t this_size = this->size(); const size_t value_count = count_equal_to(value); if(value_count == 0) return Vector<T>(*this); const size_t other_size = this_size - value_count; Vector<T> other_vector(other_size); size_t other_index = 0; for(size_t i = 0; i < this_size; i++) { if((*this)[i] != value) { other_vector[other_index] = (*this)[i]; other_index++; } } return other_vector; } template <class T> Vector<T> Vector<T>::delete_values(const Vector<T> &values) const { Vector<T> new_vector(*this); for(size_t i = 0; i < values.size(); i++) { new_vector = new_vector.delete_value(values[i]); } return(new_vector); } /// Assemble two vectors. /// @param other_vector Vector to be get_assemblyd to this vector. template <class T> Vector<T> Vector<T>::assemble(const Vector<T> &other_vector) const { const size_t this_size = this->size(); const size_t other_size = other_vector.size(); if(this_size == 0 && other_size == 0) { Vector<T> assembly; return(assembly); } else if(this_size == 0) { return(other_vector); } else if(other_size == 0) { return(*this); } else { Vector<T> assembly(this_size + other_size); for(size_t i = 0; i < this_size; i++) { assembly[i] = (*this)[i]; } for(size_t i = 0; i < other_size; i++) { assembly[this_size + i] = other_vector[i]; } return(assembly); } } /// Assemble a vector of vectors to this vector. /// @param vectors Vector of vectors to be get_assembled to this vector. template <class T> Vector<T> Vector<T>::assemble(const Vector< Vector<T> >& vectors) { const size_t vectors_size = vectors.size(); size_t new_size = 0; for(size_t i = 0; i < vectors_size; i++) { new_size += vectors[i].size(); } Vector<T> new_vector(new_size); size_t index = 0; for(size_t i = 0; i < vectors_size; i++) { for(size_t j = 0; j < vectors[i].size(); j++) { new_vector[index] = vectors[i][j]; index++; } } return(new_vector); } /// Returns a vector which is the difference of this vector and another vector. /// For instance, if this vector is(1,2,3) and the other vector is(1,4,3,3), /// the difference is(2), the element in the first vector which is not present in the second. /// @param other_vector Other vector. template <class T> Vector<T> Vector<T>::get_difference(const Vector<T> &other_vector) const { if(this->empty()) { return other_vector; } if(other_vector.empty()) { Vector<T> copy(*this); return copy; } const size_t this_size = this->size(); Vector<T> difference(this_size); typename vector<T>::iterator iterator; Vector<T> copy_this(*this); Vector<T> copy_other_vector(other_vector); sort(copy_this.begin(), copy_this.end()); sort(copy_other_vector.begin(), copy_other_vector.end()); iterator = set_difference(copy_this.begin(),copy_this.end(), copy_other_vector.begin(), copy_other_vector.end(), difference.begin()); difference.resize(iterator - difference.begin()); return(difference); } /// Returns a vector which is the union of this vector and another vector. /// For instance, if this vector is(1,2,3) and the other vector is(2,3,4), /// the union is(2,3), the elements that are present in the two vectors. /// @param other_vector Other vector. template <class T> Vector<T> Vector<T>::get_union(const Vector<T>& other_vector) const { Vector<T> this_copy(*this); sort(this_copy.begin(), this_copy.end()); Vector<T> other_copy(other_vector); sort(other_copy.begin(), other_copy.end()); Vector<T> union_vector; set_union(this_copy.begin(), this_copy.end(), other_copy.begin(), other_copy.end(), back_inserter(union_vector)); return union_vector; } /// Returns a vector with the intersection of this vector and another vector. /// For instance, if this vector is(a, b, c) and the other vector is(b, c, d, e), /// the new vector will be(b, c). /// @param other_vector Other vector. template <class T> Vector<T> Vector<T>::get_intersection(const Vector<T>& other_vector) const { Vector<T> this_copy(*this); sort(this_copy.begin(), this_copy.end()); Vector<T> other_copy(other_vector); sort(other_copy.begin(), other_copy.end()); Vector<T> intersection; set_intersection(this_copy.begin(), this_copy.end(), other_copy.begin(), other_copy.end(), back_inserter(intersection)); return intersection; } /// Returns a vector with the unique items of the vector. /// For instance, if the vector is("a,b", "b,c", "c,d"), the new vector will be(a, b, c, d). template <class T> Vector<T> Vector<T>::get_unique_items(const char& separator) const { const Vector<T> unique_mixes = get_unique_elements(); Vector< Vector<T> > items(unique_mixes.size()); Vector<T> unique_items; for(int i = 0; i < unique_mixes.size(); i++) { items[i] = unique_mixes.split_element(i, separator); unique_items = unique_items.assemble(items[i]).get_unique_elements(); } return unique_items; } /// Returns a vector with the unique values of the vector. /// For instance, if the vector is(a, b, a), the new vector will be(a, b). template <class T> Vector<T> Vector<T>::get_unique_elements() const { Vector<T> copy_vector(*this); sort(copy_vector.begin(), copy_vector.end()); const auto last = unique(copy_vector.begin(), copy_vector.end()); copy_vector.erase(last, copy_vector.end()); return(copy_vector); } template <class T> Vector<T> Vector<T>::get_unique_elements_unsorted() const { Vector<T> copy_vector(*this); const auto last = unique(copy_vector.begin(), copy_vector.end()); copy_vector.erase(last, copy_vector.end()); return(copy_vector); } /// Returns a vector with the indices of the unique elements, in the order given by the get_unique_elements method. /// For instance, if the input vector is(a, b, a), the output vector is(0, 1). template <class T> Vector<size_t> Vector<T>::get_unique_elements_first_indices() const { const Vector<T> unique_values = get_unique_elements(); const size_t unique_size = unique_values.size(); Vector<size_t> unique_indices(unique_size); for(size_t i = 0; i < unique_size; i++) { unique_indices[i] = get_first_index(unique_values[i]); } return(unique_indices); } template <class T> Vector< Vector<size_t> > Vector<T>::get_unique_elements_indices() const { const Vector<T> unique_elements = this->get_unique_elements(); const int unique_elements_size = static_cast<int>(unique_elements.size()); Vector< Vector<size_t> > unique_leads_indices(static_cast<size_t>(unique_elements_size)); #pragma omp parallel for for (int i = 0; i < unique_elements_size; i++) { unique_leads_indices[static_cast<size_t>(i)] = this->calculate_equal_to_indices(unique_elements[static_cast<size_t>(i)]); } return unique_leads_indices; } /// Returns a vector with the numbers of the unique elements, in the order given by the get_unique_elements method. /// For instance, if the input vector is(a, b, a), the output vector is(2, 1). template <class T> Vector<size_t> Vector<T>::count_unique() const { const Vector<T> unique = get_unique_elements(); const size_t unique_size = unique.size(); Vector<size_t> unique_count(unique_size); #pragma omp parallel for for(int i = 0; i < static_cast<int>(unique_size); i++) { unique_count[i] = count_equal_to(unique[i]); } return(unique_count); } /// Prints to the screen the unique elements of the vector, the number of that elements and the corresponding percentage. /// It sorts the elements from greater to smaller. /// For instance, for the vector(a, b, a), it will print /// a: 2(66.6%), b: 1(33.3%). template <class T> void Vector<T>::print_unique() const { const size_t this_size = this->size(); const Vector<T> unique = get_unique_elements(); const Vector<size_t> count = count_unique(); const Vector<double> percentage = count_unique().to_double_vector()*(100.0/static_cast<double>(this_size)); const size_t unique_size = unique.size(); Matrix<T> unique_matrix(unique_size, 3); unique_matrix.set_column(0, unique.to_string_vector()); unique_matrix.set_column(1, count.to_string_vector()); unique_matrix.set_column(2, percentage.to_string_vector()); unique_matrix = unique_matrix.sort_descending_strings(1); cout << "Total: " << this_size << endl; for(size_t i = 0; i < unique_size; i++) { cout << unique_matrix(i,0) << ": " << unique_matrix(i,1) << "(" << unique_matrix(i,2) << "%)" << endl; } } template <class T> Vector<T> Vector<T>::calculate_top(const size_t& rank) const { const Vector<T> unique = get_unique_elements(); const Vector<size_t> count = count_unique(); const size_t unique_size = unique.size(); Matrix<T> unique_matrix(unique_size, 2); unique_matrix.set_column(0, unique.to_string_vector()); unique_matrix.set_column(1, count.to_string_vector()); unique_matrix = unique_matrix.sort_descending_strings(1); const size_t end = unique_size < rank ? unique_size : rank; const Vector<T> top = unique_matrix.get_column(0).get_first(end); return(top); } template <class T> Matrix<T> Vector<T>::calculate_top_matrix(const size_t& rank) const { const Vector<T> unique = get_unique_elements(); const Vector<size_t> count = count_unique(); const size_t this_size = this->size(); const Vector<double> percentage = count_unique().to_double_vector()*(100.0/static_cast<double>(this_size)); const size_t unique_size = unique.size(); Matrix<T> unique_matrix(unique_size, 3); unique_matrix.set_column(0, unique.to_string_vector()); unique_matrix.set_column(1, count.to_string_vector()); unique_matrix.set_column(2, percentage.to_string_vector()); unique_matrix = unique_matrix.sort_descending_strings(1); const Vector<size_t> indices(0,1,rank-1); if(rank < unique_size) { unique_matrix = unique_matrix.get_submatrix_rows(indices); return(unique_matrix); } else { return(unique_matrix); } } template <class T> Matrix<T> Vector<T>::calculate_top_matrix_over(const size_t& rank, const size_t& new_total) const { const Vector<T> unique = get_unique_elements(); const Vector<size_t> count = count_unique(); const Vector<double> percentage = count_unique().to_double_vector()*(100.0/static_cast<double>(new_total)); const size_t unique_size = unique.size(); Matrix<T> unique_matrix(unique_size, 3); unique_matrix.set_column(0, unique.to_string_vector()); unique_matrix.set_column(1, count.to_string_vector()); unique_matrix.set_column(2, percentage.to_string_vector()); unique_matrix = unique_matrix.sort_descending_strings(1); const Vector<size_t> indices(0,1,rank-1); if(rank < unique_size) { unique_matrix = unique_matrix.get_submatrix_rows(indices); return(unique_matrix); } else { return(unique_matrix); } } /// Prints to the screen the unique elements of the vector, the number of that elements and the corresponding percentage. /// It sorts the elements from greater to smaller and only prints the top ones. /// @param rank Number of top elements that are printed. template <class T> void Vector<T>::print_top(const size_t& rank) const { const Vector<T> unique = get_unique_elements(); const Vector<size_t> count = count_unique(); const size_t this_size = this->size(); const Vector<double> percentage = count_unique().to_double_vector()*(100.0/static_cast<double>(this_size)); const size_t unique_size = unique.size(); if(unique_size == 0) return; Matrix<T> unique_matrix(unique_size, 3); unique_matrix.set_column(0, unique.to_string_vector()); unique_matrix.set_column(1, count.to_string_vector()); unique_matrix.set_column(2, percentage.to_string_vector()); unique_matrix = unique_matrix.sort_descending_strings(1); const size_t end = unique_size < rank ? unique_size : rank; for(size_t i = 0; i < end; i++) { cout << i+1 << ". " << unique_matrix(i,0) << ": " << unique_matrix(i,1) << "(" << unique_matrix(i,2) << "%)" << endl; } } /// Returns a std vector with the size and elements of this OpenNN vector. template <class T> vector<T> Vector<T>::to_std_vector() const { const size_t this_size = this->size(); vector<T> std_vector(this_size); for(size_t i = 0; i < this_size; i++) { std_vector[i] = (*this)[i]; } return(std_vector); } /// Returns a new vector with the elements of this vector casted to double. template <class T> Vector<double> Vector<T>::to_double_vector() const { const size_t this_size = this->size(); Vector<double> double_vector(this_size); for(size_t i = 0; i < this_size; i++) { double_vector[i] = static_cast<double>((*this)[i]); } return(double_vector); } /// Returns a new vector with the elements of this vector casted to int. template <class T> Vector<int> Vector<T>::to_int_vector() const { const size_t this_size = this->size(); Vector<int> int_vector(this_size); for(size_t i = 0; i < this_size; i++) { int_vector[i] = static_cast<int>((*this)[i]); } return(int_vector); } /// Returns a new vector with the elements of this vector casted to size_t. template <class T> Vector<size_t> Vector<T>::to_size_t_vector() const { const size_t this_size = this->size(); Vector<size_t> size_t_vector(this_size); for(size_t i = 0; i < this_size; i++) { size_t_vector[i] = static_cast<size_t>((*this)[i]); } return(size_t_vector); } /// Returns a new vector with the elements of this vector casted to time_t. template <class T> Vector<time_t> Vector<T>::to_time_t_vector() const { const size_t this_size = this->size(); Vector<time_t> size_t_vector(this_size); for(size_t i = 0; i < this_size; i++) { size_t_vector[i] = static_cast<time_t>((*this)[i]); } return(size_t_vector); } template <class T> Vector<bool> Vector<T>::to_bool_vector() const { const Vector<T> unique = get_unique_elements(); if(unique.size() != 2) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<bool> to_bool_vector() const.\n" << "Number of unique items(" << get_unique_elements().size() << ") must be 2.\n"; throw logic_error(buffer.str()); } const size_t this_size = this->size(); Vector<bool> new_vector(this_size); for(size_t i = 0; i < this_size; i++) { if((*this)[i] == unique[0]) { new_vector[i] = true; } else { new_vector[i] = false; } } return(new_vector); } /// Returns a new vector with the elements of this vector converted to string. template <class T> Vector<string> Vector<T>::to_string_vector() const { const size_t this_size = this->size(); Vector<string> string_vector(this_size); ostringstream buffer; for(size_t i = 0; i < this_size; i++) { buffer.str(""); buffer <<(*this)[i]; string_vector[i] = buffer.str(); } return(string_vector); } /// Returns a new vector with the elements of this vector casted to double. template <class T> Vector<double> Vector<T>::string_to_double(const double& exception_value) const { const size_t this_size = this->size(); Vector<double> double_vector(this_size); for(size_t i = 0; i < this_size; i++) { try { double_vector[i] = stod((*this)[i]); } catch(const logic_error&) { double_vector[i] = exception_value; } } return(double_vector); } /// Returns a new vector with the elements of this string vector converted to double. template <class T> Vector<int> Vector<T>::string_to_int(const int& exception_value) const { const size_t this_size = this->size(); Vector<int> int_vector(this_size); for(size_t i = 0; i < this_size; i++) { try { int_vector[i] = stoi((*this)[i]); } catch(const logic_error&) { int_vector[i] = exception_value; } } return(int_vector); } /// Returns a new vector with the elements of this string vector converted to size_t. template <class T> Vector<size_t> Vector<T>::string_to_size_t(const size_t& exception_value) const { const size_t this_size = this->size(); Vector<size_t> size_t_vector(this_size); for(size_t i = 0; i < this_size; i++) { try { size_t_vector[i] = static_cast<size_t>(stoi((*this)[i])); } catch(const logic_error&) { size_t_vector[i] = exception_value; } } return(size_t_vector); } /// Returns a new vector with the elements of this string vector converted to time_t. template <class T> Vector<time_t> Vector<T>::string_to_time_t(const time_t& exception_value) const { const size_t this_size = this->size(); Vector<time_t> time_vector(this_size); for(size_t i = 0; i < this_size; i++) { try { time_vector[i] = static_cast<time_t>(stoi((*this)[i])); } catch(const logic_error&) { time_vector[i] = exception_value; } } return(time_vector); } /// Takes a string vector representing a date in the format Mon Jan 30 2017 12:52:24 and returns a timestamp vector. template <class T> Vector<time_t> Vector<T>::www_mmm_ddd_yyyy_hh_mm_ss_to_time() const { const size_t this_size = this->size(); Vector<time_t> time_vector(this_size); //Mon Jan 30 2017 12:52:24 vector<string> date_elements; vector<string> time_elements; int year; int month; int month_day; int hours; int minutes; int seconds; for(size_t i = 0; i < this_size; i++) { date_elements = split_string((*this)[i], ' '); // Month if(date_elements[1] == "Jan") { month = 1; } else if(date_elements[1] == "Feb") { month = 2; } else if(date_elements[1] == "Mar") { month = 3; } else if(date_elements[1] == "Apr") { month = 4; } else if(date_elements[1] == "May") { month = 5; } else if(date_elements[1] == "Jun") { month = 6; } else if(date_elements[1] == "Jul") { month = 7; } else if(date_elements[1] == "Aug") { month = 8; } else if(date_elements[1] == "Sep") { month = 9; } else if(date_elements[1] == "Oct") { month = 10; } else if(date_elements[1] == "Nov") { month = 11; } else if(date_elements[1] == "Dec") { month = 12; } else { cout << "Unknown month: " << month << endl; } // Month day month_day = stoi(date_elements[2]); // Year year = stoi(date_elements[3]); // Time time_elements = split_string(date_elements[4], ':'); hours = stoi(time_elements[0]); minutes = stoi(time_elements[1]); seconds = stoi(time_elements[2]); struct tm timeinfo; timeinfo.tm_year = year - 1900; timeinfo.tm_mon = month - 1; timeinfo.tm_mday = month_day; timeinfo.tm_hour = hours; timeinfo.tm_min = minutes; timeinfo.tm_sec = seconds; time_vector[i] = mktime(&timeinfo); } return(time_vector); } template <class T> Vector<time_t> Vector<T>::yyyy_mm_to_time(const char& delimiter) const { const size_t this_size = this->size(); Vector<time_t> time(this_size); vector<string> date_elements; int mm; int yyyy; for(size_t i = 0; i < this_size; i++) { date_elements = split_string((*this)[i], delimiter); if(date_elements.size() == 0) { time[i] = 0; } else if(date_elements.size() != 2) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<time_t> mm_yyyy_to_time() const method.\n" << "Element " << i << " has a wrong format: \"" <<(*this)[i] << "\"" << endl; throw logic_error(buffer.str()); } else { // Month mm = stoi(date_elements[1]); // Year yyyy = stoi(date_elements[0]); struct tm time_info; time_info.tm_year = yyyy - 1900; time_info.tm_mon = mm - 1; time_info.tm_mday = 1; time[i] = mktime(&time_info) + 3600*24; if(time[i] == static_cast<time_t>(-1)) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<time_t> dd_mm_yyyy_to_time() const method.\n" << "Element " << i << " can not be converted to time_t: \"" <<(*this)[i] << "\"" << endl; throw logic_error(buffer.str()); } } } return(time); } /// Takes a string vector representing a date with day, month and year and returns a timestamp vector. /// For instance, 21/12/2017 or 21-12-2017 to 1513814400. /// @param delimiter Char between year, month and day(/, -, etc). template <class T> Vector<time_t> Vector<T>::dd_mm_yyyy_to_time(const char& delimiter) const { const size_t this_size = this->size(); Vector<time_t> time(this_size); vector<string> date_elements; int dd; int mm; int yyyy; for(size_t i = 0; i < this_size; i++) { date_elements = split_string((*this)[i], delimiter); if(date_elements.size() == 0) { time[i] = 0; } else if(date_elements.size() != 3) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<time_t> dd_mm_yyyy_to_time() const method.\n" << "Element " << i << " has a wrong format: \"" <<(*this)[i] << "\"" << endl; throw logic_error(buffer.str()); } else { // Month day dd = stoi(date_elements[0]); // Month mm = stoi(date_elements[1]); // Year yyyy = stoi(date_elements[2]); struct tm time_info; time_info.tm_year = yyyy - 1900; time_info.tm_mon = mm - 1; time_info.tm_mday = dd; time[i] = mktime(&time_info) + 3600*24; if(time[i] == static_cast<time_t>(-1)) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<time_t> dd_mm_yyyy_to_time() const method.\n" << "Element " << i << " can not be converted to time_t: \"" <<(*this)[i] << "\"" << endl; throw logic_error(buffer.str()); } } } return(time); } /// Takes a string vector representing a date with year, month and day and returns a timestamp vector. /// For instance, 2017/12/21 or 2017-12-21 to 1513814400. /// @param delimiter Char between year, month and day(/, -, etc). template <class T> Vector<time_t> Vector<T>::yyyy_mm_dd_to_time(const char& delimiter) const { const size_t this_size = this->size(); Vector<time_t> time(this_size); #pragma omp parallel for for(int i = 0; i < this_size; i++) { const vector<string> date_elements = split_string((*this)[i], delimiter); if(date_elements.size() != 3) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<time_t> yyyy_mm_dd_to_time(cont char&) const method.\n" << "Date elements of row " << i << " must be 3: \"" <<(*this)[i] << "\"" << endl; throw logic_error(buffer.str()); } // Year const int yyyy = stoi(date_elements[0]); // Month const int mm = stoi(date_elements[1]); // Month day const int dd = stoi(date_elements[2]); struct tm time_info; time_info.tm_year = yyyy - 1900; time_info.tm_mon = mm - 1; time_info.tm_mday = dd; time[static_cast<size_t>(i)] = mktime(&time_info); // if(time[i] == (time_t)-1) // { // buffer << "OpenNN Exception: Vector Template.\n" // << "Vector<time_t> yyyy_mm_dd_to_time(cont char&) const method.\n" // << "Element " << i << " can not be converted to time_t: \"" <<(*this)[i] << "\"" << endl; // throw logic_error(buffer.str()); // } } return(time); } /// Takes a string vector representing a date with day, month and year /// and returns a string vector representing a date with day of the year and year. /// For instance, 02/10/2017 to 41/2017. /// @param delimiter Char between day, month and year(/, -, etc). template <class T> Matrix<T> Vector<T>::dd_mm_yyyy_to_dd_yyyy(const char& delimiter) const { const size_t this_size = this->size(); const Vector<time_t> time = this->dd_mm_yyyy_to_time(delimiter); Matrix<T> output(this_size,2); struct tm* date_info; for(size_t i = 0; i < this_size; i++) { date_info = gmtime(&time[i]); output(i, 0) = to_string(date_info->tm_yday + 1); output(i, 1) = to_string(date_info->tm_year + 1900); } return(output); } /// Takes a string vector representing a date with year, month and day /// and returns a string vector representing a date with day of the year and year. /// For instance, 02/10/2017 to 41/2017. /// @param delimiter Char between year, month and day(/, -, etc). template <class T> Matrix<T> Vector<T>::yyyy_mm_dd_to_dd_yyyy(const char& delimiter) const { const size_t this_size = this->size(); const Vector<time_t> time = this->yyyy_mm_dd_to_time(delimiter); Matrix<T> output(this_size,2); #pragma omp parallel for for(int i = 0; i < this_size; i++) { struct tm* date_info = gmtime(&time[static_cast<size_t>(i)]); output(i, 0) = to_string(date_info->tm_yday + 1); output(i, 1) = to_string(date_info->tm_year + 1900); } return(output); } template <class T> Matrix<T> Vector<T>::mm_yyyy_to_mm_yyyy(const char& delimiter) const { const size_t this_size = this->size(); const Vector<time_t> time = this->mm_yyyy_to_time(delimiter); Matrix<T> output(this_size,2); #pragma omp parallel for for(size_t i = 0; i < this_size; i++) { struct tm* date_info = gmtime(&time[i]); output(i, 0) = to_string(date_info->tm_yday + 1); output(i, 1) = to_string(date_info->tm_year + 1900); } return(output); } /// Takes a string vector representing a date with day, month and year /// and returns a string vector with the corresponding weekday. /// For instance, 2017/12/21 to 5. /// @param delimiter Char between year, month and day(/, -, etc). template <class T> Vector<T> Vector<T>::yyyy_mm_dd_to_weekday(const char& delimiter) const { const size_t this_size = this->size(); const Vector<time_t> time = yyyy_mm_dd_to_time(delimiter); Vector<T> output(this_size); #pragma omp parallel for for(int i = 0; i < this_size; i++) { struct tm* date_info = gmtime(&time[static_cast<size_t>(i)]); output[i] = to_string(date_info->tm_wday + 1); } return(output); } /// Takes a string vector representing a date with year, month and day /// and returns a string vector with the corresponding day of the year. /// For instance, 2017/02/10 to 41. /// @param delimiter Char between year, month and day(/, -, etc). template <class T> Vector<T> Vector<T>::yyyy_mm_dd_to_yearday(const char& delimiter) const { const size_t this_size = this->size(); const Vector<time_t> time = this->yyyy_mm_dd_to_time(delimiter); Vector<T> output(this_size); #pragma omp parallel for for(int i = 0; i < this_size; i++) { struct tm* date_info = gmtime(&time[static_cast<size_t>(i)]); output[i] = to_string(date_info->tm_yday + 1); } return(output); } template <class T> Vector<struct tm> Vector<T>::time_stamp_to_time_structure() const { const size_t this_size = this->size(); Vector<struct tm> new_vector(this_size); time_t time_stamp; struct tm time_stucture; for(size_t i = 0; i < this_size; i++) { time_stamp = (*this)[i]; time_stucture = *gmtime(&time_stamp); new_vector[i] = time_stucture; } return(new_vector); } template <class T> Vector< Vector<T> > Vector<T>::split(const size_t& n) const { // determine number of sub-vectors of size n const size_t batches_number = (this->size() - 1) / n + 1; // create array of vectors to store the sub-vectors Vector< Vector<T> > batches(batches_number); // each iteration of this loop process next set of n elements // and store it in a vector at k'th index in vec for (size_t k = 0; k < batches_number; ++k) { // get range for next set of n elements auto start_itr = std::next(this->cbegin(), k*n); auto end_itr = std::next(this->cbegin(), k*n + n); // allocate memory for the sub-vector batches[k].resize(n); // code to handle the last sub-vector as it might // contain less elements if (k*n + n > this->size()) { end_itr = this->cend(); batches[k].resize(this->size() - k*n); } // copy elements from the input range to the sub-vector std::copy(start_itr, end_itr, batches[k].begin()); } return batches; } /// Returns a row matrix with number of rows equal to one /// and number of columns equal to the size of this vector. template <class T> Matrix<T> Vector<T>::to_row_matrix() const { const size_t this_size = this->size(); Matrix<T> matrix(1, this_size); for(size_t i = 0; i < this_size; i++) { matrix(0, i) = (*this)[i]; } return(matrix); } /// Returns a column matrix with number of rows equal to the size of this vector /// and number of columns equal to one. template <class T> Matrix<T> Vector<T>::to_column_matrix() const { const size_t this_size = this->size(); Matrix<T> matrix(this_size, 1); for(size_t i = 0; i < this_size; i++) { matrix(i, 0) = (*this)[i]; } return(matrix); } /// This method takes a string representation of a vector and sets this vector /// to have size equal to the number of words and values equal to that words. /// @param str String to be parsed. template <class T> void Vector<T>::parse(const string &str) { if(str.empty()) { set(); } else { istringstream buffer(str); istream_iterator<string> first(buffer); istream_iterator<string> last; Vector<string> str_vector(first, last); const size_t new_size = str_vector.size(); if(new_size > 0) { this->resize(new_size); buffer.clear(); buffer.seekg(0, ios::beg); for(size_t i = 0; i < new_size; i++) { buffer >>(*this)[i]; } } } } /// Returns a string representation of this vector. /// @param separator Char between the elements(, -, /, etc). /// @param quotation Quotation char for the elements(", '). template <class T> string Vector<T>::vector_to_string(const char& separator, const char& quotation) const { ostringstream buffer; const size_t this_size = this->size(); if(this_size > 0) { buffer << quotation <<(*this)[0] << quotation; for(size_t i = 1; i < this_size; i++) { buffer << separator << quotation <<(*this)[i] << quotation; } } return(buffer.str()); } /// Returns a string representation of this vector. /// @param separator Char between the elements(, -, /, etc). template <class T> string Vector<T>::vector_to_string(const char& separator) const { ostringstream buffer; const size_t this_size = this->size(); if(this_size > 0) { buffer <<(*this)[0]; for(size_t i = 1; i < this_size; i++) { buffer << separator <<(*this)[i]; } } return(buffer.str()); } /// Returns a string representation of this vector. template <class T> string Vector<T>::vector_to_string() const { ostringstream buffer; const size_t this_size = this->size(); if(this_size > 0) { buffer <<(*this)[0]; for(size_t i = 1; i < this_size; i++) { buffer << ' ' <<(*this)[i]; } } return(buffer.str()); } template <class T> string Vector<T>::stack_vector_to_string() const { ostringstream buffer; const size_t this_size = this->size(); if(this_size > 0) { buffer <<(*this)[0]; for(size_t i = 1; i < this_size; i++) { buffer <<(*this)[i]; } } return(buffer.str()); } /// Returns a string representation of this vector which can be inserted in a text. template <class T> string Vector<T>::to_text(const char& separator) const { ostringstream buffer; const size_t this_size = this->size(); if(this_size > 0) { buffer <<(*this)[0]; for(size_t i = 1; i < this_size; i++) { buffer << separator <<(*this)[i]; } } return(buffer.str()); } template <class T> string Vector<T>::to_text(const string& separator) const { ostringstream buffer; const size_t this_size = this->size(); if(this_size > 0) { buffer <<(*this)[0]; for(size_t i = 1; i < this_size; i++) { buffer << separator <<(*this)[i]; } } return(buffer.str()); } /// This method retuns a vector of strings with size equal to the size of this /// vector and elements equal to string representations of the elements of this /// vector. template <class T> Vector<string> Vector<T>::write_string_vector(const size_t &precision) const { const size_t this_size = this->size(); Vector<string> string_vector(this_size); ostringstream buffer; for(size_t i = 0; i < this_size; i++) { buffer.str(""); buffer << setprecision(precision) <<(*this)[i]; string_vector[i] = buffer.str(); } return(string_vector); } /// Returns a matrix with given numbers of rows and columns and with the /// elements of this vector ordered by rows. /// The number of rows multiplied by the number of columns must be equal to the /// size of this vector. /// @param rows_number Number of rows in the new matrix. /// @param columns_number Number of columns in the new matrix. template <class T> Matrix<T> Vector<T>::to_matrix(const size_t &rows_number, const size_t &columns_number) const { // Control sentence(if debug) #ifdef __OPENNN_DEBUG__ const size_t this_size = this->size(); if(rows_number * columns_number != this_size) { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Matrix<T> to_matrix(const size_t&, const size_t&) method.\n" << "The number of rows(" << rows_number << ") times the number of colums(" << columns_number << ") must be equal to the size of the vector(" << this_size << ").\n"; throw logic_error(buffer.str()); } #endif Matrix<T> matrix(rows_number, columns_number); for(size_t i = 0; i < this->size(); i++) { matrix[i] = (*this)[i]; } return(matrix); } template <class T> double Vector<T>::calculate_logistic_function(const Vector<double>& coefficients, const Vector<T>& x) const { const size_t coefficients_size = coefficients.size(); double exponential = coefficients[0]; for(size_t i = 1; i < coefficients_size; i++) { exponential += coefficients[i]*x[i-1]; } return(1.0/(1.0+exp(-exponential))); } template <class T> Vector<double> Vector<T>::calculate_logistic_error_gradient(const Vector<double>& coefficients, const Vector<T>& other) const { const size_t n = this->size(); const size_t other_size = this->size(); Vector<double> error_gradient(3, 0.0); size_t negatives_number = 0; size_t positives_number = 0; for(size_t i = 0; i < other_size; i++) { if(other[i] == 1) { positives_number++; } else if(other[i] == 0) { negatives_number++; } } double negatives_weight = 1.0; double positives_weight = 1.0; if(positives_number == 0) { positives_weight = 1.0; negatives_weight = 1.0; } else if(negatives_number == 0) { positives_weight = 1.0; negatives_weight = 1.0; negatives_number = 1; } else { positives_weight = static_cast<double>(negatives_number)/static_cast<double>(positives_number); } #pragma omp parallel for for(int i = 0; i < n; i++) { Vector<double> x(1); x[0] = (*this)[i]; double current_logistic_function = calculate_logistic_function(coefficients, x); const double gradient_multiply = exp(-(coefficients[0]+coefficients[1]*x[0]))*(other[i] - current_logistic_function)*current_logistic_function*current_logistic_function; Vector<double> this_error_gradient(3, 0.0); this_error_gradient[0] += (other[i]*positives_weight + (1-other[i])*negatives_weight)*(other[i] - current_logistic_function)*(other[i] - current_logistic_function)/2; this_error_gradient[1] -= (other[i]*positives_weight + (1-other[i])*negatives_weight)*gradient_multiply; this_error_gradient[2] -= (other[i]*positives_weight + (1-other[i])*negatives_weight)*x[0]*gradient_multiply; #pragma omp critical { error_gradient += this_error_gradient; } } return error_gradient/static_cast<double>(negatives_weight*negatives_number); } // Vector input operator /// This method re-writes the inputs operator >> for the Vector template. /// @param is Input stream. /// @param v Input vector. template <class T> istream &operator>>(istream &is, Vector<T> &v) { const size_t size = v.size(); for(size_t i = 0; i < size; i++) { is >> v[i]; } return(is); } // Vector output operator /// This method re-writes the output operator << for the Vector template. /// @param os Output stream. /// @param v Output vector. template <class T> ostream &operator<<(ostream &os, const Vector<T> &v) { const size_t this_size = v.size(); if(this_size > 0) { os << v[0]; const char space = ' '; for(size_t i = 1; i < this_size; i++) { os << space << v[i]; } } return(os); } // Vector of vectors output operator /// This method re-writes the output operator << for vectors of vectors. /// @param os Output stream. /// @param v Output vector of vectors. template <class T> ostream &operator<<(ostream &os, const Vector< Vector<T> > &v) { for(size_t i = 0; i < v.size(); i++) { os << "subvector_" << i << "\n" << v[i] << endl; } return(os); } // Vector of matrices output operator /// This method re-writes the output operator << for vectors of matrices. /// @param os Output stream. /// @param v Output vector of matrices. template <class T> ostream &operator<<(ostream &os, const Vector< Matrix<T> > &v) { for(size_t i = 0; i < v.size(); i++) { os << "submatrix_" << i << "\n" << v[i] << endl; } return(os); } /// Returns a random number chosen from a uniform distribution. /// @param minimum Minimum value. /// @param maximum Maximum value. template <class T> T calculate_random_uniform(const T &minimum, const T &maximum) { const T random = static_cast<T>(rand() /(RAND_MAX + 1.0)); const T random_uniform = minimum + (maximum - minimum) * random; return(random_uniform); } template <class T> string number_to_string(const T& value) { ostringstream ss; ss << value; return(ss.str()); } /// Returns a random number chosen from a normal distribution. /// @param mean Mean value of normal distribution. /// @param standard_deviation Standard deviation value of normal distribution. template <class T> T calculate_random_normal(const T &mean, const T &standard_deviation) { const double pi = 4.0 * atan(1.0); T random_uniform_1; do { random_uniform_1 = static_cast<T>(rand()) /(RAND_MAX + 1.0); } while(random_uniform_1 == 0.0); const T random_uniform_2 = static_cast<T>(rand()) /(RAND_MAX + 1.0); // Box-Muller transformation const T random_normal = mean + sqrt(-2.0 * log(random_uniform_1)) * sin(2.0 * pi * random_uniform_2) * standard_deviation; return(random_normal); } template <class T> string write_elapsed_time(const T& elapsed_time) { string elapsed_time_string; const size_t hours = static_cast<size_t>(elapsed_time/3600); size_t minutes = static_cast<size_t>(elapsed_time) - hours*3600; minutes = static_cast<size_t>(minutes/60); const size_t seconds = static_cast<size_t>(elapsed_time) - hours*3600 - minutes*60; if(hours != 0) { elapsed_time_string = to_string(hours) + ":"; } if(minutes < 10) { elapsed_time_string += "0"; } elapsed_time_string += to_string(minutes) + ":"; if(seconds < 10) { elapsed_time_string += "0"; } elapsed_time_string += to_string(seconds); return elapsed_time_string; } template <class T> string write_date_from_time_t(const T& date) { char date_char[20]; strftime(date_char, 20, "%d/%m/%Y", localtime(&date)); const string date_string(date_char); return date_string; } /// Splits the string into substrings wherever delimiter occurs, and returns the vector of those strings. /// If sep does not match anywhere in the string, split() returns a single-element list containing this string. /// @param source String to be splited. /// @param delimiter Separator between substrings. template <class T> vector<string> split_string(const T& source, const char& delimiter) { vector<string> elements; string element; istringstream is(source); while(getline(is, element, delimiter)) { elements.push_back(element); } return(elements); } /// Replaces a substring by another one in a given string. /// @param source String. /// @param find Substring to be replaced. /// @param replace Substring to be put. template <class T> void replace_substring(T& source, const T& find, const T& replace) { for(string::size_type i = 0;(i = source.find(find, i)) != string::npos;) { source.replace(i, find.length(), replace); i += replace.length(); } } /// This structure contains the simplest statistics for a set, variable, etc. /// It includes the minimum, maximum, mean and standard deviation variables. template <class T> struct Statistics { // Default constructor. Statistics(); // Values constructor. Statistics(const T &, const T &, const T &, const T &); /// Destructor. virtual ~Statistics(); // METHODS void set_minimum(const double &); void set_maximum(const double &); void set_mean(const double &); void set_standard_deviation(const double &); Vector<T> to_vector() const; void initialize_random(); bool has_minimum_minus_one_maximum_one(); bool has_mean_zero_standard_deviation_one(); void save(const string &file_name) const; /// Smallest value of a set, function, etc. T minimum = 0; /// Biggest value of a set, function, etc. T maximum = 0; /// Mean value of a set, function, etc. T mean = 0; /// Standard deviation value of a set, function, etc. T standard_deviation = 0; }; template <class T> Statistics<T>::Statistics() { minimum = static_cast<T>(-1.0); maximum = static_cast<T>(1.0); mean = static_cast<T>(0.0); standard_deviation = static_cast<T>(1.0); } /// Values constructor. template <class T> Statistics<T>::Statistics(const T &new_minimum, const T &new_maximum, const T &new_mean, const T &new_standard_deviation) { minimum = new_minimum; maximum = new_maximum; mean = new_mean; standard_deviation = new_standard_deviation; } /// Destructor. template <class T> Statistics<T>::~Statistics() {} /// Sets a new minimum value in the statistics structure. /// @param new_minimum Minimum value. template <class T> void Statistics<T>::set_minimum(const double &new_minimum) { minimum = new_minimum; } /// Sets a new maximum value in the statistics structure. /// @param new_maximum Maximum value. template <class T> void Statistics<T>::set_maximum(const double &new_maximum) { maximum = new_maximum; } /// Sets a new mean value in the statistics structure. /// @param new_mean Mean value. template <class T> void Statistics<T>::set_mean(const double &new_mean) { mean = new_mean; } /// Sets a new standard deviation value in the statistics structure. /// @param new_standard_deviation Standard deviation value. template <class T> void Statistics<T>::set_standard_deviation(const double &new_standard_deviation) { standard_deviation = new_standard_deviation; } /// Returns all the statistical parameters contained in a single vector. /// The size of that vector is seven. /// The elements correspond to the minimum, maximum, mean and standard deviation /// values respectively. template <class T> Vector<T> Statistics<T>::to_vector() const { Vector<T> statistics_vector(4); statistics_vector[0] = minimum; statistics_vector[1] = maximum; statistics_vector[2] = mean; statistics_vector[3] = standard_deviation; return(statistics_vector); } /// Initializes the statistics structure with a random /// minimum(between -1 and 1), maximum(between 0 and 1), /// mean(between -1 and 1), standard deviation(between 0 and 1). template <class T> void Statistics<T>::initialize_random() { minimum = calculate_random_uniform(-1.0, 0.0); maximum = calculate_random_uniform(0.0, 1.0); mean = calculate_random_uniform(-1.0, 1.0); standard_deviation = calculate_random_uniform(0.0, 1.0); } /// Returns true if the minimum value is -1 and the maximum value is +1, /// and false otherwise. template <class T> bool Statistics<T>::has_minimum_minus_one_maximum_one() { if(-1.000001 < minimum && minimum < -0.999999 && 0.999999 < maximum && maximum < 1.000001) { return(true); } else { return(false); } } /// Returns true if the mean value is 0 and the standard deviation value is 1, /// and false otherwise. template <class T> bool Statistics<T>::has_mean_zero_standard_deviation_one() { if(-0.000001 < mean && mean < 0.000001 && 0.999999 < standard_deviation && standard_deviation < 1.000001) { return(true); } else { return(false); } } /// Saves to a file the minimum, maximum, standard deviation, asymmetry and /// kurtosis values /// of the statistics structure. /// @param file_name Name of statistics data file. template <class T> void Statistics<T>::save(const string &file_name) const { ofstream file(file_name.c_str()); if(!file.is_open()) { ostringstream buffer; buffer << "OpenNN Exception: Statistics template.\n" << "void save(const string&) const method.\n" << "Cannot open statistics data file.\n"; throw logic_error(buffer.str()); } // Write file file << "Minimum: " << minimum << endl << "Maximum: " << maximum << endl << "Mean: " << mean << endl << "Standard deviation: " << standard_deviation << endl; // Close file file.close(); } // Statistics output operator /// This method re-writes the output operator << for the Statistics template. /// @param os Output stream. /// @param v Output vector. template <class T> ostream &operator<<(ostream &os, const Statistics<T> &statistics) { os << " Minimum: " << statistics.minimum << endl << " Maximum: " << statistics.maximum << endl << " Mean: " << statistics.mean << endl << " Standard deviation: " << statistics.standard_deviation << endl; return(os); } /// /// This template contains the data needed to represent a histogram. /// template <class T> struct Histogram { /// Default constructor. explicit Histogram(); /// Destructor. virtual ~Histogram(); /// Bins number constructor. Histogram(const size_t &); /// Values constructor. Histogram(const Vector<T> &, const Vector<size_t> &); size_t get_bins_number() const; size_t count_empty_bins() const; size_t calculate_minimum_frequency() const; size_t calculate_maximum_frequency() const; size_t calculate_most_populated_bin() const; Vector<T> calculate_minimal_centers() const; Vector<T> calculate_maximal_centers() const; size_t calculate_bin(const T &) const; size_t calculate_frequency(const T &) const; /// Positions of the bins in the histogram. Vector<T> centers; /// Minimum of the bins in the histogram. Vector<T> minimums; /// Maximum of the bins in the histogram. Vector<T> maximums; /// Population of the bins in the histogram. Vector<size_t> frequencies; }; template <class T> Histogram<T>::Histogram() {} /// Destructor. template <class T> Histogram<T>::~Histogram() {} /// Bins number constructor. /// @param bins_number Number of bins in the histogram. template <class T> Histogram<T>::Histogram(const size_t &bins_number) { centers.resize(bins_number); frequencies.resize(bins_number); } /// Values constructor. /// @param new_centers Center values for the bins. /// @param new_frequencies Number of variates in each bin. template <class T> Histogram<T>::Histogram(const Vector<T> &new_centers, const Vector<size_t> &new_frequencies) { centers = new_centers; frequencies = new_frequencies; } /// Returns the number of bins in the histogram. template <class T> size_t Histogram<T>::get_bins_number() const { return(centers.size()); } /// Returns the number of bins with zero variates. template <class T> size_t Histogram<T>::count_empty_bins() const { return(frequencies.count_equal_to(0)); } /// Returns the number of variates in the less populated bin. template <class T> size_t Histogram<T>::calculate_minimum_frequency() const { return(frequencies.calculate_minimum()); } /// Returns the number of variates in the most populated bin. template <class T> size_t Histogram<T>::calculate_maximum_frequency() const { return(frequencies.calculate_maximum()); } /// Retuns the index of the most populated bin. template <class T> size_t Histogram<T>::calculate_most_populated_bin() const { return(frequencies.calculate_maximal_index()); } /// Returns a vector with the centers of the less populated bins. template <class T> Vector<T> Histogram<T>::calculate_minimal_centers() const { const size_t minimum_frequency = calculate_minimum_frequency(); const Vector<size_t> minimal_indices = frequencies.calculate_equal_to_indices(minimum_frequency); return(centers.get_subvector(minimal_indices)); } /// Returns a vector with the centers of the most populated bins. template <class T> Vector<T> Histogram<T>::calculate_maximal_centers() const { const size_t maximum_frequency = calculate_maximum_frequency(); const Vector<size_t> maximal_indices = frequencies.calculate_equal_to_indices(maximum_frequency); return(centers.get_subvector(maximal_indices)); } /// Returns the number of the bin to which a given value belongs to. /// @param value Value for which we want to get the bin. template <class T> size_t Histogram<T>::calculate_bin(const T &value) const { const size_t bins_number = get_bins_number(); const double minimum_center = centers[0]; const double maximum_center = centers[bins_number - 1]; const double length = static_cast<double>(maximum_center - minimum_center)/static_cast<double>(bins_number - 1.0); double minimum_value = centers[0] - length / 2; double maximum_value = minimum_value + length; if(value < maximum_value) { return(0); } for(size_t j = 1; j < bins_number - 1; j++) { minimum_value = minimum_value + length; maximum_value = maximum_value + length; if(value >= minimum_value && value < maximum_value) { return(j); } } if(value >= maximum_value) { return(bins_number - 1); } else { ostringstream buffer; buffer << "OpenNN Exception: Vector Template.\n" << "Vector<size_t> Histogram<T>::calculate_bin(const T&) const.\n" << "Unknown return value.\n"; throw logic_error(buffer.str()); } } /// Returns the frequency of the bin to which a given value bolongs to. /// @param value Value for which we want to get the frequency. template <class T> size_t Histogram<T>::calculate_frequency(const T &value) const { const size_t bin_number = calculate_bin(value); const size_t frequency = frequencies[bin_number]; return(frequency); } // Histogram output operator /// This method re-writes the output operator << for the Histogram template. /// @param os Output stream. /// @param v Output vector. template <class T> ostream &operator<<(ostream &os, const Histogram<T> &histogram) { os << "Histogram structure\n" << "Centers: " << histogram.centers << endl << "Frequencies: " << histogram.frequencies << endl; return(os); } /// /// This template defines the parameters of a linear regression analysis between /// two sets x-y. /// template <class T> struct LinearRegressionParameters { /// Y-intercept of the linear regression. double intercept; /// Slope of the linear regression. double slope; /// Correlation coefficient(R-value) of the linear regression. double correlation; void initialize_random(); }; /// Initializes the linear regression parameters structure with a random /// intercept (), slope (between -1 and 1) /// and correlation (between -1 and 1). template <class T> void LinearRegressionParameters<T>::initialize_random() { intercept = rand(); slope = calculate_random_uniform(-1.0, 1.0); correlation = calculate_random_uniform(-1.0, 1.0); } template <class T> ostream & operator<<(ostream &os, const LinearRegressionParameters<T> &linear_regression_parameters) { os << "Linear regression parameters:\n" << "Intercept: " << linear_regression_parameters.intercept << "\n" << "Slope: " << linear_regression_parameters.slope << "\n" << "Correlation: " << linear_regression_parameters.correlation << endl; return(os); } /// This template defines the parameters of a logistic regression analysis /// between two sets x-y. template <class T> struct LogisticRegressionParameters { /// Independent coefficient of the logistic function. double a; /// x coefficient of the logistic function. double b; /// Correlation coefficient of the logistic regression. double correlation; }; template <class T> ostream &operator << ( ostream &os, const LogisticRegressionParameters<T> &logistic_regression_parameters) { os << "Logistic regression parameters:\n" << "a: " << logistic_regression_parameters.a << "\n" << "b: " << logistic_regression_parameters.b << "\n" << "Correlation: " << logistic_regression_parameters.correlation << endl; return(os); } template<class T> Vector<T> sine(const Vector<T>& x) { size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { y[i] = sin(x[i]); } return y; } template<class T> Vector<T> cosine(const Vector<T>& x) { size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { y[i] = cosine(x[i]); } return y; } template <class T> struct KMeansResults { Vector< Vector<size_t> > clusters; }; template<class T> Vector<T> hyperbolic_tangent(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { y[i] = tanh(x[i]); } return y; } template<class T> Vector<T> hyperbolic_tangent_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { const double hyperbolic_tangent = tanh(x[i]); y[i] = 1.0 - hyperbolic_tangent*hyperbolic_tangent; } return y; } template<class T> Vector<T> linear(const Vector<T>& x) { return x; } template<class T> Vector<T> linear_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n, 1); return y; } template<class T> Vector<T> linear_second_derivatives(const Vector<T>& x) { const Vector<double> y(x.size(),0); return y; } template<class T> Vector<T> hyperbolic_tangent_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { const double hyperbolic_tangent = tanh(x[i]); y[i] = -2*hyperbolic_tangent*(1 - hyperbolic_tangent * hyperbolic_tangent); } return y; } template<class T> Vector<T> logistic(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { y[i] = 1.0 / (1.0 + exp(-x[i])); } return y; } template<class T> Vector<T> logistic_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { const double exponential = exp(-x[i]); y[i] = exponential / ((1.0 + exponential)*(1.0 + exponential)); } return y; } template<class T> Vector<T> logistic_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { const double exponential = exp(-x[i]); y[i] = (exponential*exponential - exponential) / ((1.0 + exponential)*(1.0 + exponential)*(1.0 + exponential)); } return y; } template<class T> Vector<T> threshold(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? y[i] = 0.0 : y[i] = 1.0; // if(x[i] < 0) // { // y[i] = 0.0; // } // else // { // y[i] = 1.0; // } } return y; } template<class T> Vector<T> threshold_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { if(x[i] < 0 || x[i] > 0) { y[i] = 0.0; } else { ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> threshold_derivatives(const Matrix<T>&).\n" << "Derivate does not exist for x equal to 0.\n"; throw logic_error(buffer.str()); } } return y; } template<class T> Vector<T> threshold_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { if(x[i] < 0 || x[i] > 0) { y[i] = 0.0; } else { ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> threshold_derivatives(const Matrix<T>&).\n" << "Derivate does not exist for x equal to 0.\n"; throw logic_error(buffer.str()); } } return y; } template<class T> Vector<T> symmetric_threshold(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { if(x[i] < 0) { y[i] = -1.0; } else { y[i] = 1.0; } } return y; } template<class T> Vector<T> rectified_linear(const Vector<T>& x) { cout << "HERE" << endl; const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? y[i] = 0.0 : y[i] = x[i]; } return y; } template<class T> Vector<T> rectified_linear_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> derivatives(n); for(size_t i = 0; i < n; i++) { x[i] <= 0.0 ? derivatives[i] = 0.0 : derivatives[i] = 1.0; } return derivatives; } template<class T> Vector<T> rectified_linear_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); const Vector<T> second_derivatives(n, 0.0); return second_derivatives; } template<class T> Vector<T> scaled_exponential_linear(const Vector<T>& x) { const size_t n = x.size(); const double lambda = 1.0507; const double alpha = 1.67326; Vector<T> y(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? y[i] = lambda * alpha * (exp(x[i]) - 1) : y[i] = lambda * x[i]; } return y; } template<class T> Vector<T> scaled_exponential_linear_derivatives(const Vector<T>& x) { const size_t n = x.size(); const double lambda =1.0507; const double alpha =1.67326; Vector<T> derivatives(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? derivatives[i] = lambda * alpha * exp(x[i]) : derivatives[i] = lambda; } return derivatives; } template<class T> Vector<T> scaled_exponential_linear_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); const double lambda = 1.0507; const double alpha = 1.67326; Vector<T> second_derivatives(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? second_derivatives[i] = lambda * alpha * exp(x[i]) : second_derivatives[i] = 0.0; } return second_derivatives; } template<class T> Vector<T> soft_plus(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { y[i] = log(1 + exp(x[i])); } return y; } template<class T> Vector<T> soft_plus_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> derivatives(n); for(size_t i = 0; i < n; i++) { derivatives[i] = 1/(1 + exp(-x[i])); } return derivatives; } template<class T> Vector<T> soft_plus_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> second_derivatives(n); for(size_t i = 0; i < n; i++) { second_derivatives[n] = exp(-x[i]) / pow((1 + exp(-x[i])), 2); } return second_derivatives; } template<class T> Vector<T> soft_sign(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? y[i] = x[i] / (1 - x[i]) : y[i] = x[i] / (1 + x[i]); } return y; } template<class T> Vector<T> soft_sign_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> derivatives(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? derivatives[i] = 1 / pow((1 - x[i]), 2) : derivatives[i] = 1 / pow((1 + x[i]), 2); } return derivatives; } template<class T> Vector<T> soft_sign_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> second_derivatives(n); for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? second_derivatives[i] = -(2 * x[i]) / pow((1 - x[i]), 3) : second_derivatives[i] = -(2 * x[i]) / pow((1 + x[i]), 3); } return second_derivatives; } template<class T> Vector<T> hard_sigmoid(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { if(x[i] < -2.5) { y[n] = 0; } else if(x[i] > 2.5) { y[n] = 1; } else { y[n] = 0.2 * x[i] + 0.5; } } return y; } template<class T> Vector<T> hard_sigmoid_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> derivatives(n); for(size_t i = 0; i < n; i++) { x[i] < -2.5 || x[i] > 2.5 ? derivatives[i] = 0.0 : derivatives[i] = 0.2; } return derivatives; } template<class T> Vector<T> hard_sigmoid_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> second_derivatives(n, 0.0); return second_derivatives; } template<class T> Vector<T> exponential_linear(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); const double alpha = 1.0; for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? y[i] = alpha * (exp(x[i])- 1) : y[i] = x[i]; } return y; } template<class T> Vector<T> exponential_linear_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> derivatives(n); const double alpha = 1.0; for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? derivatives[i] = alpha * exp(x[i]) : derivatives[i] = 1.0; } return derivatives; } template<class T> Vector<T> exponential_linear_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> second_derivatives(n); const double alpha = 1.0; for(size_t i = 0; i < n; i++) { x[i] < 0.0 ? second_derivatives[i] = alpha * exp(x[i]) : second_derivatives[i] = 0.0; } return second_derivatives; } template<class T> Vector<T> symmetric_threshold_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { if(x[i] == 0) { ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> threshold_derivatives(const Matrix<T>&).\n" << "Derivate does not exist for x equal to 0.\n"; throw logic_error(buffer.str()); } else { y[i] = 0.0; } } return y; } template<class T> Vector<T> symmetric_threshold_second_derivatives(const Vector<T>& x) { const size_t n = x.size(); Vector<T> y(n); for(size_t i = 0; i < n; i++) { if(x[i] == 0) { ostringstream buffer; buffer << "OpenNN Exception: Matrix Template.\n" << "Matrix<T> threshold_derivatives(const Matrix<T>&).\n" << "Derivate does not exist for x equal to 0.\n"; throw logic_error(buffer.str()); } else { y[i] = 0.0; } } return y; } } // end namespace OpenNN #endif // OpenNN: Open Neural Networks Library. // Copyright(C) 2005-2018 Artificial Intelligence Techniques, SL. // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
uts.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /**********************************************************************************************/ /* * Copyright (c) 2007 The Unbalanced Tree Search (UTS) Project Team: * ----------------------------------------------------------------- * * This file is part of the unbalanced tree search benchmark. This * project is licensed under the MIT Open Source license. See the LICENSE * file for copyright and licensing information. * * UTS is a collaborative project between researchers at the University of * Maryland, the University of North Carolina at Chapel Hill, and the Ohio * State University. * * University of Maryland: * Chau-Wen Tseng(1) <tseng at cs.umd.edu> * * University of North Carolina, Chapel Hill: * Jun Huan <huan, * Jinze Liu liu, * Stephen Olivier olivier, * Jan Prins* prins at cs.umd.edu> * * The Ohio State University: * James Dinan <dinan, * Gerald Sabin sabin, * P. Sadayappan* saday at cse.ohio-state.edu> * * Supercomputing Research Center * D. Pryor * * (1) - indicates project PI * * UTS Recursive Depth-First Search (DFS) version developed by James Dinan * * Adapted for OpenMP 3.0 Task-based version by Stephen Olivier * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include <sys/time.h> #include "app-desc.h" #include "bots.h" #include "uts.h" /*********************************************************** * Global state * ***********************************************************/ unsigned long long nLeaves = 0; int maxTreeDepth = 0; /*********************************************************** * Tree generation strategy is controlled via various * * parameters set from the command line. The parameters * * and their default values are given below. * * Trees are generated using a Galton-Watson process, in * * which the branching factor of each node is a random * * variable. * * * * The random variable follow a binomial distribution. * ***********************************************************/ double b_0 = 4.0; // default branching factor at the root int rootId = 0; // default seed for RNG state at root /*********************************************************** * The branching factor at the root is specified by b_0. * The branching factor below the root follows an * identical binomial distribution at all nodes. * A node has m children with prob q, or no children with * prob (1-q). The expected branching factor is q * m. * * Default parameter values ***********************************************************/ int nonLeafBF = 4; // m double nonLeafProb = 15.0 / 64.0; // q /*********************************************************** * compute granularity - number of rng evaluations per * tree node ***********************************************************/ int computeGranularity = 1; /*********************************************************** * expected results for execution ***********************************************************/ unsigned long long exp_tree_size = 0; int exp_tree_depth = 0; unsigned long long exp_num_leaves = 0; /*********************************************************** * FUNCTIONS * ***********************************************************/ // Interpret 32 bit positive integer as value on [0,1) double rng_toProb(int n) { if (n < 0) { printf("*** toProb: rand n = %d out of range\n",n); } return ((n<0)? 0.0 : ((double) n)/2147483648.0); } void uts_initRoot(Node * root) { root->height = 0; root->numChildren = -1; // means not yet determined rng_init(root->state.state, rootId); bots_message("Root node at %p\n", root); } int uts_numChildren_bin(Node * parent) { // distribution is identical everywhere below root int v = rng_rand(parent->state.state); double d = rng_toProb(v); return (d < nonLeafProb) ? nonLeafBF : 0; } int uts_numChildren(Node *parent) { int numChildren = 0; /* Determine the number of children */ if (parent->height == 0) numChildren = (int) floor(b_0); else numChildren = uts_numChildren_bin(parent); // limit number of children // only a BIN root can have more than MAXNUMCHILDREN if (parent->height == 0) { int rootBF = (int) ceil(b_0); if (numChildren > rootBF) { bots_debug("*** Number of children of root truncated from %d to %d\n", numChildren, rootBF); numChildren = rootBF; } } else { if (numChildren > MAXNUMCHILDREN) { bots_debug("*** Number of children truncated from %d to %d\n", numChildren, MAXNUMCHILDREN); numChildren = MAXNUMCHILDREN; } } return numChildren; } /*********************************************************** * Recursive depth-first implementation * ***********************************************************/ unsigned long long parallel_uts ( Node *root ) { unsigned long long num_nodes = 0 ; root->numChildren = uts_numChildren(root); bots_message("Computing Unbalance Tree Search algorithm "); #pragma omp parallel #pragma omp single nowait //consider removing nowait and see how it affects perfomance #pragma omp task num_nodes = parTreeSearch( 0, root, root->numChildren ); bots_message(" completed!"); return num_nodes; } unsigned long long parTreeSearch(int depth, Node *parent, int numChildren) { Node n[numChildren], *nodePtr; int i, j; unsigned long long subtreesize = 1, partialCount[numChildren]; //this is to prevent use of unnecessary taskwaits which braek the profiler assertions //if (numChildren == 0){ //printf("[uts bots parTreeSearch] numChildren == 0\n"); // return subtreesize; //} // Recurse on the children for (i = 0; i < numChildren; i++) { nodePtr = &n[i]; nodePtr->height = parent->height + 1; // The following line is the work (one or more SHA-1 ops) for (j = 0; j < computeGranularity; j++) { rng_spawn(parent->state.state, nodePtr->state.state, i); } nodePtr->numChildren = uts_numChildren(nodePtr); #pragma omp task firstprivate(i, nodePtr) shared(partialCount) partialCount[i] = parTreeSearch(depth+1, nodePtr, nodePtr->numChildren); } #pragma omp taskwait for (i = 0; i < numChildren; i++) { subtreesize += partialCount[i]; } return subtreesize; } void uts_read_file ( char *filename ) { FILE *fin; if ((fin = fopen(filename, "r")) == NULL) { bots_message("Could not open input file (%s)\n", filename); exit (-1); } fscanf(fin,"%lf %lf %d %d %d %llu %d %llu", &b_0, &nonLeafProb, &nonLeafBF, &rootId, &computeGranularity, &exp_tree_size, &exp_tree_depth, &exp_num_leaves ); fclose(fin); computeGranularity = max(1,computeGranularity); // Printing input data bots_message("\n"); bots_message("Root branching factor = %f\n", b_0); bots_message("Root seed (0 <= 2^31) = %d\n", rootId); bots_message("Probability of non-leaf node = %f\n", nonLeafProb); bots_message("Number of children for non-leaf node = %d\n", nonLeafBF); bots_message("E(n) = %f\n", (double) ( nonLeafProb * nonLeafBF ) ); bots_message("E(s) = %f\n", (double) ( 1.0 / (1.0 - nonLeafProb * nonLeafBF) ) ); bots_message("Compute granularity = %d\n", computeGranularity); bots_message("Random number generator = "); rng_showtype(); } void uts_show_stats( void ) { int nPes = atoi(bots_resources); int chunkSize = 0; bots_message("\n"); bots_message("Tree size = %llu\n", (unsigned long long) bots_number_of_tasks ); bots_message("Maximum tree depth = %d\n", maxTreeDepth ); bots_message("Chunk size = %d\n", chunkSize ); bots_message("Number of leaves = %llu (%.2f%%)\n", nLeaves, nLeaves/(float)bots_number_of_tasks*100.0 ); bots_message("Number of PE's = %.4d threads\n", nPes ); bots_message("Wallclock time = %.3f sec\n", bots_time_program ); bots_message("Overall performance = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program) ); bots_message("Performance per PE = %.0f nodes/sec\n", (bots_number_of_tasks / bots_time_program / nPes) ); } int uts_check_result ( void ) { int answer = BOTS_RESULT_SUCCESSFUL; if ( bots_number_of_tasks != exp_tree_size ) { answer = BOTS_RESULT_UNSUCCESSFUL; bots_message("Incorrect tree size result (%llu instead of %llu).\n", bots_number_of_tasks, exp_tree_size); } return answer; }
no_option.c
// RUN: %clang_cc1 -verify -o - %s // RUN: %clang_cc1 -verify -o - %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics int a; #pragma omp threadprivate(a, b) #pragma omp parallel
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float *outptr = out; float *outptr2 = outptr + outw; const float *img0 = bottom_blob.channel(q); const float *kernel0 = kernel + p * inch * 25 + q * 25; const float *r0 = img0; const float *r1 = img0 + w; const float *r2 = img0 + w * 2; const float *r3 = img0 + w * 3; const float *r4 = img0 + w * 4; const float *r5 = img0 + w * 5; const float *k0 = kernel0; const float *k1 = kernel0 + 5; const float *k2 = kernel0 + 10; const float *k3 = kernel0 + 15; const float *k4 = kernel0 + 20; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } }
OmpForBeginLink.c
int main() { int i; #pragma omp for for (i = 0; i < 10; i++) { } }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; if ((source_image->alpha_trait == UndefinedPixelTrait) && (image->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception); status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *p; Quantum *q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++) { PixelChannel channel = GetPixelChannelChannel(source_image,i); PixelTrait source_traits = GetPixelChannelTraits(source_image, channel); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((source_traits == UndefinedPixelTrait) || (traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *p; Quantum *q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; double angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=2.0*geometry_info.rho; height=width; if ((flags & HeightValue) != 0) height=2.0*geometry_info.sigma; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; if ((flags & XValue) != 0 ) { MagickRealType angle; /* Rotate vectors if a rotation angle is given. */ angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { /* Lets set a angle range and calculate in the loop. */ angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* Perform the variable blurring of each pixel in image. */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case FreezeCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case InterpolateCompositeOp: case LightenCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case MultiplyCompositeOp: case NegateCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ReflectCompositeOp: case ScreenCompositeOp: case SoftBurnCompositeOp: case SoftDodgeCompositeOp: case SoftLightCompositeOp: case StampCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } case ModulusAddCompositeOp: { if ((Sa+Da) <= 1.0) { alpha=(Sa+Da); break; } alpha=((Sa+Da)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sa-Da) >= 0.0) { alpha=(Sa-Da); break; } alpha=((Sa-Da)+1.0); break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case RMSECompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits = GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((channel == AlphaPixelChannel) && ((traits & UpdatePixelTrait) != 0)) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case RMSECompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case BlurCompositeOp: case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case DifferenceCompositeOp: { pixel=QuantumRange*fabs(Sa-Da); break; } case FreezeCompositeOp: { pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)* PerceptibleReciprocal(Da)); if (pixel < 0.0) pixel=0.0; break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25* cos(MagickPI*Da)); break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } case NegateCompositeOp: { pixel=QuantumRange*((1.0-Sa-Da)); break; } case ReflectCompositeOp: { pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case StampCompositeOp: { pixel=QuantumRange*(Sa+Da*Da-1.0); break; } case StereoCompositeOp: { pixel=QuantumRange*(Sa+Da)/2; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) GetPixelBlack(source_image,p); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*gamma*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case FreezeCompositeOp: { pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)* PerceptibleReciprocal(Dca)); if (pixel < 0.0) pixel=0.0; break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25* cos(MagickPI*Dca)); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { if ((Sca+Dca) <= 1.0) { pixel=QuantumRange*(Sca+Dca); break; } pixel=QuantumRange*((Sca+Dca)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sca-Dca) >= 0.0) { pixel=QuantumRange*(Sca-Dca); break; } pixel=QuantumRange*((Sca-Dca)+1.0); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case NegateCompositeOp: { pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*Sca; break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case ReflectCompositeOp: { pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case RMSECompositeOp: { double gray; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } gray=sqrt( (canvas_pixel.red-source_pixel.red)* (canvas_pixel.red-source_pixel.red)+ (canvas_pixel.green-source_pixel.green)* (canvas_pixel.green-source_pixel.green)+ (canvas_pixel.blue-source_pixel.blue)* (canvas_pixel.blue-source_pixel.blue)/3.0); switch (channel) { case RedPixelChannel: pixel=gray; break; case GreenPixelChannel: pixel=gray; break; case BluePixelChannel: pixel=gray; break; default: pixel=Dc; break; } break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftBurnCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)* PerceptibleReciprocal(Dca)); break; } case SoftDodgeCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)* PerceptibleReciprocal(Sca)); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StampCompositeOp: { pixel=QuantumRange*(Sca+Dca*Dca-1.0); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *p, *pixels; ssize_t x; Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
mat_mul_blocks.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> struct matrix { int ncols; int nrows; double* mat; }; #define BLOCKSIZE 64 void readMatrix(double** m, FILE* file, int rows, int cols); void printMatrix(double** m, FILE* file, int rows, int cols); void matrixMul(int BS, double** m1, double** m2, double** m3, int m, int p); /* * The file which contains a matrix has in its first row the dimensions * then using fscanf each element of the matrix is stored on the memory allocated dynamically */ void readMatrix(double** m, FILE* file, int rows, int cols) { int i, j; for(i=0; i<rows; i++){ m[i]=(double*)malloc(cols*sizeof(double)); } for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { fscanf(file, "%lf", &m[i][j]); } } } /* The opposite operation of readMatrix. Stores a matrix into a file, element by element */ void printMatrix(double** m, FILE* file, int rows, int cols) { int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { fprintf(file, "%lf ", m[i][j]); } fprintf(file, "\n"); } } /* * Performs the multiplication operation between the matrices m1 and m2. * The result will be stored in the matrix m3. * The algorithm is practically the one that can be found here: https://en.wikipedia.org/wiki/Matrix_multiplication#Definition */ void matrixMul(int BS, double** m1, double** m2, double** m3, int m, int p) { int i, j, k; int ii, jj, kk; //the resulting matrix is subdivided in smaller tiles, each has dimension BSxBS for(i=0; i<m; i+=BS){ for(j=0; j<p; j+=BS){ for(k=0; k<p; k+=BS){ #pragma omp task depend(in: m1[i:BS][k:BS], m2[k:BS][j:BS]) depend(inout: m3[i:BS][j:BS]) //each thread works on a tile so different tiles are computed simulaneously, m3 dependency is important otherwise data races for (ii = i; ii < i+BS; ii++) { for (jj = j; jj < j+BS; jj++) { for (kk = k; kk < k+BS; kk++) { m3[ii][jj] += m1[ii][kk] * m2[kk][jj]; } } } } } } } int main(int argc, char* argv[]) { if(argc != 3){ //1- exe name, 2- mat1.txt, 3- mat2.txt printf("Parameter error.\n"); exit(1); } FILE *mat1, *mat2, *resultFile; double t; int m, n1, n2, p, i; mat1 = fopen(argv[1], "r"); mat2 = fopen(argv[2], "r"); fscanf(mat1, "%d %d", &m, &n1); fscanf(mat2, "%d %d", &n2, &p); /* Multiplication is permitted if m1 is m x n and m2 is n x p, m1 must have the same number of column of the rows of m2 matrix */ if(n1 != n2) { printf("It is not possible to do matrix multiplication. Check matrix number of rows and cols.\n"); fclose(mat1); fclose(mat2); exit(1); } double ** m1 = (double **)malloc(m*sizeof(double*)); double ** m2 = (double **)malloc(n2*sizeof(double*)); double ** m3 = (double **)malloc(m*sizeof(double*)); readMatrix(m1, mat1, m, n1); readMatrix(m2, mat2, n2, p); t = omp_get_wtime(); #pragma omp parallel for private(i) for(i=0; i<m; i++){ m3[i]=(double*)malloc(p*sizeof(double)); memset(m3[i], 0, p * sizeof(double)); } #pragma omp parallel #pragma omp single matrixMul(BLOCKSIZE, m1, m2, m3, m, p); t = omp_get_wtime() - t; //total time spent in matrixMul (wall clock time) resultFile = fopen("result.txt", "w"); printMatrix(m3, resultFile, m, p); printf("Elapsed time: %.5f seconds\n", t); fclose(mat1); fclose(mat2); fclose(resultFile); for(i=0; i<m; i++){ free(m1[i]); free(m3[i]); } for(i=0; i<n2; i++){ free(m2[i]); } free(m1); free(m2); free(m3); return 0; }
MASK_merge_core.c
/* This works has been developed at Diamond Light Source Ltd. * * Copyright 2019 Daniil Kazantsev * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "MASK_merge_core.h" #include "utils.h" /* A method to ensure connectivity within regions of the segmented image/volume. Here we assume * that the MASK has been obtained using some classification/segmentation method such as k-means or gaussian * mixture. Some pixels/voxels have been misclassified and we check the spatial dependences * and correct the mask. We check the connectivity using the bresenham line algorithm within the non-local window * surrounding the pixel of interest. * https://link.springer.com/content/pdf/10.1007/3-540-44438-6_8.pdf * * Input Parameters (from Python): * 1. MASK [0:255], the result of some classification algorithm (information-based preferably, Gaussian Mixtures works quite well) * 2. The list of classes needs to be processed. The order matters, e.g. (air, crystal) * 3. The list of improbable combinations of classes, such as: (class_start, class_middle, class_end, class_substiture) * 4. The size of the Correction Window (neighbourhood window) * 5. The number of iterations * Output: * 1. MASK_upd - the UPDATED MASK where some regions have been corrected (merged) or removed * 2. CORRECTEDRegions - The array of the same size as MASK where all regions which were * changed are highlighted and the changes have been counted */ int Mask_merge_main(unsigned char *MASK, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, unsigned char *SelClassesList, unsigned char *ComboClasses, int tot_combinations, int SelClassesList_length, int classesNumb, int CorrectionWindow, int iterationsNumb, int dimX, int dimY, int dimZ) { long i,j,k,l,n; int counterG, switcher; long DimTotal; unsigned char *MASK_temp, *ClassesList, CurrClass, temp, class_start, class_mid, class_end, class_substitute; DimTotal = (long)(dimX*dimY*dimZ); /* defines the list for all classes in the mask */ ClassesList = (unsigned char*) calloc (classesNumb,sizeof(unsigned char)); /* find which classes (values) are present in the segmented data */ CurrClass = MASK[0]; ClassesList[0]= MASK[0]; counterG = 1; for(i=0; i<DimTotal; i++) { if (MASK[i] != CurrClass) { switcher = 1; for(j=0; j<counterG; j++) { if (ClassesList[j] == MASK[i]) { switcher = 0; break; }} if (switcher == 1) { CurrClass = MASK[i]; ClassesList[counterG] = MASK[i]; /*printf("[%u]\n", ClassesList[counterG]);*/ counterG++; } } if (counterG == classesNumb) break; } /* sort from LOW->HIGH the obtained values (classes) */ for(i=0; i<classesNumb; i++) { for(j=0; j<classesNumb-1; j++) { if(ClassesList[j] > ClassesList[j+1]) { temp = ClassesList[j+1]; ClassesList[j+1] = ClassesList[j]; ClassesList[j] = temp; }}} MASK_temp = (unsigned char*) calloc (DimTotal,sizeof(unsigned char)); /* copy given MASK to MASK_upd*/ copyIm_unchar(MASK, MASK_upd, (long)(dimX), (long)(dimY), (long)(dimZ)); if (dimZ == 1) { /********************** PERFORM 2D MASK PROCESSING ************************/ /* start iterations */ for(k=0; k<iterationsNumb; k++) { #pragma omp parallel for shared(MASK,MASK_upd) private(i,j) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* STEP1: in a smaller neighbourhood check that the current pixel is NOT an outlier */ OutiersRemoval2D(MASK, MASK_upd, i, j, (long)(dimX), (long)(dimY)); }} /* copy the updated MASK (clean of outliers) */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); // printf("[%u][%u][%u]\n", ClassesList[0], ClassesList[1], ClassesList[2]); for(l=0; l<SelClassesList_length; l++) { /*printf("[%u]\n", ClassesList[SelClassesList[l]]);*/ #pragma omp parallel for shared(MASK_temp,MASK_upd,l) private(i,j) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { /* The class of the central pixel has not changed, i.e. the central pixel is not an outlier -> continue */ if (MASK_temp[j*dimX+i] == MASK[j*dimX+i]) { /* !One needs to work with a specific class to avoid overlaps! It is crucial to establish relevant classes first (given as an input in SelClassesList) */ if (MASK_temp[j*dimX+i] == ClassesList[SelClassesList[l]]) { Mask_update_main2D(MASK_temp, MASK_upd, CORRECTEDRegions, i, j, CorrectionWindow, (long)(dimX), (long)(dimY)); }} }} /* copy the updated mask */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); } /* Main classes have been processed. Working with implausable combinations */ /* loop over the combinations of 3 */ for(l=0; l<tot_combinations; l++) { class_start = ComboClasses[l*4]; /* current class */ class_mid = ComboClasses[l*4+1]; /* class in-between */ class_end = ComboClasses[l*4+2]; /* neighbour class */ class_substitute = ComboClasses[l*4+3]; /* class to replace class_mid with */ /*printf("[%i][%u][%u][%u][%u]\n", l, class_start, class_mid, class_end, class_substitute);*/ #pragma omp parallel for shared(MASK_temp,MASK_upd, l, class_start, class_mid, class_end, class_substitute) private(i,j) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { Mask_update_combo2D(MASK_temp, MASK_upd, CORRECTEDRegions, ClassesList, class_start, class_mid, class_end, class_substitute, i, j, CorrectionWindow, (long)(dimX), (long)(dimY)); }} copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); } } } else { /********************** PERFORM 3D MASK PROCESSING ************************/ /* start iterations */ for(l=0; l<iterationsNumb; l++) { #pragma omp parallel for shared(MASK,MASK_upd) private(i,j,k) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { /* STEP1: in a smaller neighbourhood check that the current pixel is NOT an outlier */ OutiersRemoval3D(MASK, MASK_upd, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ)); }}} /* copy the updated MASK (clean of outliers) */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); // printf("[%u][%u][%u]\n", ClassesList[0], ClassesList[1], ClassesList[2]); for(n=0; n<SelClassesList_length; n++) { /*printf("[%u]\n", ClassesList[SelClassesList[l]]);*/ #pragma omp parallel for shared(MASK_temp,MASK_upd,l,n) private(i,j,k) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { /* The class of the central pixel has not changed, i.e. the central pixel is not an outlier -> continue */ if (MASK_temp[(dimX*dimY)*k + j*dimX+i] == MASK[(dimX*dimY)*k + j*dimX+i]) { /* !One needs to work with a specific class to avoid overlaps! It is crucial to establish relevant classes first (given as an input in SelClassesList) */ if (MASK_temp[(dimX*dimY)*k + j*dimX+i] == ClassesList[SelClassesList[n]]) { Mask_update_main3D(MASK_temp, MASK_upd, CORRECTEDRegions, i, j, k, CorrectionWindow, (long)(dimX), (long)(dimY), (long)(dimZ)); }} }}} /* copy the updated mask */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); } /* Main classes have been processed. Working with implausable combinations */ /* loop over the combinations of 3 */ for(n=0; n<tot_combinations; n++) { class_start = ComboClasses[n*4]; /* current class */ class_mid = ComboClasses[n*4+1]; /* class in-between */ class_end = ComboClasses[n*4+2]; /* neighbour class */ class_substitute = ComboClasses[n*4+3]; /* class to replace class_mid with */ /*printf("[%i][%u][%u][%u][%u]\n", l, class_start, class_mid, class_end, class_substitute);*/ #pragma omp parallel for shared(MASK_temp,MASK_upd, n, l, class_start, class_mid, class_end, class_substitute) private(i,j,k) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { Mask_update_combo3D(MASK_temp, MASK_upd, CORRECTEDRegions, ClassesList, class_start, class_mid, class_end, class_substitute, i, j, k, CorrectionWindow, (long)(dimX), (long)(dimY), (long)(dimZ)); }}} copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); } } /* iterations terminated*/ } free(MASK_temp); free(ClassesList); return 0; } int mask_morph_main(unsigned char *MASK, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, int primeClass, int CorrectionWindow, int iterationsNumb, int dimX, int dimY, int dimZ) { long i, j, k, l, ll, class_select; long DimTotal; unsigned char *MASK_temp; DimTotal = (long)(dimX*dimY*dimZ); MASK_temp = (unsigned char*) calloc (DimTotal,sizeof(unsigned char)); /* copy given MASK to MASK_upd*/ copyIm_unchar(MASK, MASK_upd, (long)(dimX), (long)(dimY), (long)(dimZ)); if (dimZ == 1) { /********************** PERFORM 2D MASK PROCESSING ************************/ /* start iterations */ for(k=0; k<iterationsNumb; k++) { #pragma omp parallel for shared(MASK,MASK_upd) private(i,j) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { /* STEP1: in a smaller neighbourhood check that the current pixel is NOT an outlier */ OutiersRemoval2D(MASK, MASK_upd, i, j, (long)(dimX), (long)(dimY)); }} /* copy the updated MASK (clean of outliers) to MASK_temp*/ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); for(ll=0; ll<2; ll++) { #pragma omp parallel for shared(MASK_temp,MASK_upd,k,ll) private(i,j) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { /* The class of the central pixel has not changed, i.e. the central pixel is not an outlier -> continue */ if (MASK_temp[j*dimX+i] == MASK[j*dimX+i]) { if ((primeClass != 0) && (k == 0)) class_select = primeClass; else class_select = ll; /* One needs to work with a specific class to avoid possible overlaps */ if (MASK_temp[j*dimX+i] == class_select) { Mask_update_main2D(MASK_temp, MASK_upd, CORRECTEDRegions, i, j, CorrectionWindow, (long)(dimX), (long)(dimY)); }} }} /* copy the updated mask */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); } /*end ll*/ } } else { /********************** PERFORM 3D MASK PROCESSING ************************/ /* start iterations */ for(l=0; l<iterationsNumb; l++) { #pragma omp parallel for shared(MASK,MASK_upd) private(i,j,k) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { /* STEP1: in a smaller neighbourhood check that the current pixel is NOT an outlier */ OutiersRemoval3D(MASK, MASK_upd, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ)); }}} /* copy the updated MASK (clean of outliers) */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); for(ll=0; ll<2; ll++) { #pragma omp parallel for shared(MASK_temp,MASK_upd,l,ll) private(i,j,k) for(k=0; k<dimZ; k++) { for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { /* The class of the central pixel has not changed, i.e. the central pixel is not an outlier -> continue */ if (MASK_temp[(dimX*dimY)*k + j*dimX+i] == MASK[(dimX*dimY)*k + j*dimX+i]) { if ((primeClass != 0) && (l == 0)) class_select = primeClass; else class_select = ll; /* One needs to work with a specific class to avoid possible overlaps */ if (MASK_temp[(dimX*dimY)*k + j*dimX+i] == class_select) { Mask_update_main3D(MASK_temp, MASK_upd, CORRECTEDRegions, i, j, k, CorrectionWindow, (long)(dimX), (long)(dimY), (long)(dimZ)); }} }}} /* copy the updated mask */ copyIm_unchar(MASK_upd, MASK_temp, (long)(dimX), (long)(dimY), (long)(dimZ)); } /*end ll*/ } /* iterations terminated*/ } free(MASK_temp); return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ void OutiersRemoval2D(unsigned char *MASK, unsigned char *MASK_upd, long i, long j, long dimX, long dimY) { /*if the ROI pixel does not belong to the surrondings, turn it into the surronding*/ long i_m, j_m, i1, j1, counter; counter = 0; for(i_m=-1; i_m<=1; i_m++) { for(j_m=-1; j_m<=1; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if (MASK[j*dimX+i] != MASK[j1*dimX+i1]) counter++; } }} if (counter >= 8) MASK_upd[j*dimX+i] = MASK[j1*dimX+i1]; return; } void Mask_update_main2D(unsigned char *MASK_temp, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, long i, long j, int CorrectionWindow, long dimX, long dimY) { long i_m, j_m, i1, j1, CounterOtherClass; /* STEP2: in a larger neighbourhood check that the other class is present in the neighbourhood */ CounterOtherClass = 0; for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if (MASK_temp[j*dimX+i] != MASK_temp[j1*dimX+i1]) CounterOtherClass++; } if (CounterOtherClass > 0) break; }} if (CounterOtherClass > 0) { /* the other class is present in the vicinity of CorrectionWindow, continue to STEP 3 */ /* STEP 3: Loop through all neighbours in CorrectionWindow and check the spatial connection. Meaning that we're instrested if there are any classes between points A and B that does not belong to A and B (A,B \in C) */ for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if (MASK_temp[j*dimX+i] == MASK_temp[j1*dimX+i1]) { /* A and B points belong to the same class, do STEP 4*/ /* STEP 4: Run the Bresenham line algorithm between A and B points and convert all points on the way to the class of A. */ bresenham2D_main(i, j, i1, j1, MASK_temp, MASK_upd, CORRECTEDRegions, (long)(dimX), (long)(dimY)); } } }} } return; } void bresenham2D_main(int i, int j, int i1, int j1, unsigned char *MASK, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, long dimX, long dimY) { int n; int x[] = {i, i1}; int y[] = {j, j1}; int steep = (fabs(y[1]-y[0]) > fabs(x[1]-x[0])); int ystep = 0; //printf("[%i][%i][%i][%i]\n", x[1], y[1], steep, kk) ; //if (steep == 1) {swap(x[0],y[0]); swap(x[1],y[1]);} if (steep == 1) { // swaping int a, b; a = x[0]; b = y[0]; x[0] = b; y[0] = a; a = x[1]; b = y[1]; x[1] = b; y[1] = a; } if (x[0] > x[1]) { int a, b; a = x[0]; b = x[1]; x[0] = b; x[1] = a; a = y[0]; b = y[1]; y[0] = b; y[1] = a; } //(x[0] > x[1]) int delx = x[1]-x[0]; int dely = fabs(y[1]-y[0]); int error = 0; int x_n = x[0]; int y_n = y[0]; if (y[0] < y[1]) {ystep = 1;} else {ystep = -1;} for(n = 0; n<delx+1; n++) { if (steep == 1) { /* this replaces any class which is different from classes in the starting and ending points */ if (MASK[j*dimX+i] != MASK[x_n*dimX+y_n]) { MASK_upd[x_n*dimX+y_n] = MASK[j*dimX+i]; CORRECTEDRegions[x_n*dimX+y_n] += 1; } } else { if (MASK[j*dimX+i] != MASK[y_n*dimX+x_n]) { MASK_upd[y_n*dimX+x_n] = MASK[j*dimX+i]; CORRECTEDRegions[y_n*dimX+x_n] += 1; } } x_n = x_n + 1; error = error + dely; if (2*error >= delx) { y_n = y_n + ystep; error = error - delx; } // (2*error >= delx) } // for(int n = 0; n<delx+1; n++) return; } void Mask_update_combo2D(unsigned char *MASK_temp, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, unsigned char *ClassesList, unsigned char class_start, unsigned char class_mid, unsigned char class_end, unsigned char class_substitute, long i, long j, int CorrectionWindow, long dimX, long dimY) { long i_m, j_m, i1, j1, CounterOtherClass; /* STEP2: in a larger neighbourhood check that the other class is present in the neighbourhood */ CounterOtherClass = 0; for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if (MASK_temp[j*dimX+i] != MASK_temp[j1*dimX+i1]) CounterOtherClass++; } }} if (CounterOtherClass > 0) { /* the other class is present in the vicinity of CorrectionWindow, continue to STEP 3 */ /* STEP 3: Loop through all neighbours in CorrectionWindow and check the spatial connections. Check that if there are any classes between points A and B that does not belong to A and B (A,B \in C) */ for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { i1 = i+i_m; j1 = j+j_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) { if ((MASK_temp[j*dimX+i] == ClassesList[class_start]) && (MASK_temp[j1*dimX+i1] == ClassesList[class_end])) { /* We check that point A belongs to "class_start" and point B to "class_end". If they do then the idea is to check if "class_mid" (undesirable class) lies inbetween two classes. If it does -> replace it with "class_substitute". */ bresenham2D_combo(i, j, i1, j1, MASK_temp, MASK_upd, CORRECTEDRegions, ClassesList, class_mid, class_substitute, (long)(dimX), (long)(dimY)); } } }} } return; } void bresenham2D_combo(int i, int j, int i1, int j1, unsigned char *MASK, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, unsigned char *ClassesList, unsigned char class_mid, unsigned char class_substitute, long dimX, long dimY) { int n; int x[] = {i, i1}; int y[] = {j, j1}; int steep = (fabs(y[1]-y[0]) > fabs(x[1]-x[0])); int ystep = 0; //printf("[%i][%i][%i][%i]\n", x[1], y[1], steep, kk) ; //if (steep == 1) {swap(x[0],y[0]); swap(x[1],y[1]);} if (steep == 1) { // swaping int a, b; a = x[0]; b = y[0]; x[0] = b; y[0] = a; a = x[1]; b = y[1]; x[1] = b; y[1] = a; } if (x[0] > x[1]) { int a, b; a = x[0]; b = x[1]; x[0] = b; x[1] = a; a = y[0]; b = y[1]; y[0] = b; y[1] = a; } //(x[0] > x[1]) int delx = x[1]-x[0]; int dely = fabs(y[1]-y[0]); int error = 0; int x_n = x[0]; int y_n = y[0]; if (y[0] < y[1]) {ystep = 1;} else {ystep = -1;} for(n = 0; n<delx+1; n++) { if (steep == 1) { /*printf("[%i][%i][%u]\n", x_n, y_n, MASK[y_n*dimX+x_n]);*/ /* dealing with various improbable combination of classes in the mask. The improbable class is replaced with more probable one. */ if (MASK[x_n*dimX+y_n] == ClassesList[class_mid]) { MASK_upd[x_n*dimX+y_n] = ClassesList[class_substitute]; CORRECTEDRegions[x_n*dimX+y_n] += 1; } } else { // printf("[%i][%i][%u]\n", y_n, x_n, MASK[x_n*dimX+y_n]); /* dealing with various improbable combination of classes in the mask. The improbable class is replaced with more probable one. */ if (MASK[y_n*dimX+x_n] == ClassesList[class_mid]) { MASK_upd[y_n*dimX+x_n] = ClassesList[class_substitute]; CORRECTEDRegions[y_n*dimX+x_n] += 1; } } x_n = x_n + 1; error = error + dely; if (2*error >= delx) { y_n = y_n + ystep; error = error - delx; } // (2*error >= delx) //printf("[%i][%i][%i]\n", X_new[n], Y_new[n], n) ; } // for(int n = 0; n<delx+1; n++) return; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ void OutiersRemoval3D(unsigned char *MASK, unsigned char *MASK_upd, long i, long j, long k, long dimX, long dimY, long dimZ) { /*if the ROI pixel does not belong to the surrondings, turn it into the surronding*/ long i_m, j_m, k_m, i1, j1, k1, counter; counter = 0; for(i_m=-1; i_m<=1; i_m++) { for(j_m=-1; j_m<=1; j_m++) { for(k_m=-1; k_m<=1; k_m++) { i1 = i+i_m; j1 = j+j_m; k1 = k+k_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) { if (MASK[(dimX*dimY)*k + j*dimX+i] != MASK[(dimX*dimY)*k1 + j1*dimX+i1]) counter++; } }}} if (counter >= 25) MASK_upd[(dimX*dimY)*k + j*dimX+i] = MASK[(dimX*dimY)*k1 + j1*dimX+i1]; return; } void Mask_update_main3D(unsigned char *MASK_temp, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, long i, long j, long k, int CorrectionWindow, long dimX, long dimY, long dimZ) { long i_m, j_m, k_m, i1, j1, k1, CounterOtherClass; /* STEP2: in a larger neighbourhood check first that the other class is present in the neighbourhood */ CounterOtherClass = 0; for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { for(k_m=-CorrectionWindow; k_m<=CorrectionWindow; k_m++) { i1 = i+i_m; j1 = j+j_m; k1 = k+k_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) { if (MASK_temp[(dimX*dimY)*k + j*dimX+i] != MASK_temp[(dimX*dimY)*k1 + j1*dimX+i1]) CounterOtherClass++; } if (CounterOtherClass > 0) break; }}} if (CounterOtherClass > 0) { /* the other class is present in the vicinity of CorrectionWindow, continue to STEP 3 */ /* STEP 3: Loop through all neighbours in CorrectionWindow and check the spatial connection. Meaning that we're instrested if there are any classes between points A and B that does not belong to A and B (A,B \in C) */ for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { for(k_m=-CorrectionWindow; k_m<=CorrectionWindow; k_m++) { i1 = i+i_m; j1 = j+j_m; k1 = k+k_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) { if (MASK_temp[(dimX*dimY)*k + j*dimX+i] == MASK_temp[(dimX*dimY)*k1 + j1*dimX+i1]) { /* A and B points belong to the same class, do STEP 4*/ /* STEP 4: Run the Bresenham line algorithm between A and B points and convert all points on the way to the class of A. */ bresenham3D_main(i, j, k, i1, j1, k1, MASK_temp, MASK_upd, CORRECTEDRegions, (long)(dimX), (long)(dimY), (long)(dimZ)); } } }}} } return; } void bresenham3D_main(int i, int j, int k, int i1, int j1, int k1, unsigned char *MASK, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, long dimX, long dimY, long dimZ) { int P1[] = {i, j, k}; int P2[] = {i1, j1, k1}; int x1 = P1[0]; int y1 = P1[1]; int z1 = P1[2]; int x2 = P2[0]; int y2 = P2[1]; int z2 = P2[2]; int dx = x2 - x1; int dy = y2 - y1; int dz = z2 - z1; int ax = fabs(dx)*2; int ay = fabs(dy)*2; int az = fabs(dz)*2; int sx = signum(dx); int sy = signum(dy); int sz = signum(dz); int x = x1; int y = y1; int z = z1; int xd; int yd; int zd; //printf("ijk indeces: [%i][%i][%i]\n", i, j, k) ; if (ax >= max(ay, az)) { int yd = ay - ax/2; int zd = az - ax/2; while (1) { // printf("xyz indeces: [%i][%i][%i]\n", x, y, z) ; if (MASK[(dimX*dimY)*k + j*dimX+i] != MASK[(dimX*dimY)*z + y*dimX+x]) { MASK_upd[(dimX*dimY)*z + y*dimX+x] = MASK[(dimX*dimY)*k + j*dimX+i]; CORRECTEDRegions[(dimX*dimY)*z + y*dimX+x] += 1; } if (x == x2) break; if (yd >= 0) { y = y + sy; // move along y yd = yd - ax; } if (zd >= 0) { z = z + sz; // % move along z zd = zd - ax; } x = x + sx; // move along x yd = yd + ay; zd = zd + az; } //while } // (ax>= fmax(ay,az)) else if (ay >= max(ax, az)) { xd = ax - ay/2; zd = az - ay/2; while (1) { if (MASK[(dimX*dimY)*k + j*dimX+i] != MASK[(dimX*dimY)*z + y*dimX+x]) { MASK_upd[(dimX*dimY)*z + y*dimX+x] = MASK[(dimX*dimY)*k + j*dimX+i]; CORRECTEDRegions[(dimX*dimY)*z + y*dimX+x] += 1; } if (y == y2) break; if (xd >= 0) { x = x + sx; // move along x xd = xd - ay; } if (zd >= 0) { z = z + sz; //move along z zd = zd - ay; } y = y + sy; // % move along y xd = xd + ax; zd = zd + az; } // while } else if (az >= max(ax, ay)) { xd = ax - az/2; yd = ay - az/2; while (1) { if (MASK[(dimX*dimY)*k + j*dimX+i] != MASK[(dimX*dimY)*z + y*dimX+x]) { MASK_upd[(dimX*dimY)*z + y*dimX+x] = MASK[(dimX*dimY)*k + j*dimX+i]; CORRECTEDRegions[(dimX*dimY)*z + y*dimX+x] += 1; } if(z == z2) break; if(xd >= 0) { x = x + sx; // move along x xd = xd - az; } if (yd >= 0) { y = y + sy; // % move along y yd = yd - az; } z = z + sz; //% move along z xd = xd + ax; yd = yd + ay; } //while loop } return; } void Mask_update_combo3D(unsigned char *MASK_temp, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, unsigned char *ClassesList, unsigned char class_start, unsigned char class_mid, unsigned char class_end, unsigned char class_substitute, long i, long j, long k, int CorrectionWindow, long dimX, long dimY, long dimZ) { long i_m, j_m, k_m, i1, j1, k1, CounterOtherClass; /* STEP2: in a larger neighbourhood check that the other class is present in the neighbourhood */ CounterOtherClass = 0; for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { for(k_m=-CorrectionWindow; k_m<=CorrectionWindow; k_m++) { i1 = i+i_m; j1 = j+j_m; k1 = k+k_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) { if (MASK_temp[(dimX*dimY)*k + j*dimX+i] != MASK_temp[(dimX*dimY)*k1 + j1*dimX+i1]) CounterOtherClass++; } if (CounterOtherClass > 0) break; }}} if (CounterOtherClass > 0) { /* the other class is present in the vicinity of CorrectionWindow, continue to STEP 3 */ /* STEP 3: Loop through all neighbours in CorrectionWindow and check the spatial connections. Check that if there are any classes between points A and B that does not belong to A and B (A,B \in C) */ for(i_m=-CorrectionWindow; i_m<=CorrectionWindow; i_m++) { for(j_m=-CorrectionWindow; j_m<=CorrectionWindow; j_m++) { for(k_m=-CorrectionWindow; k_m<=CorrectionWindow; k_m++) { i1 = i+i_m; j1 = j+j_m; k1 = k+k_m; if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) { if ((MASK_temp[(dimX*dimY)*k + j*dimX+i] == ClassesList[class_start]) && (MASK_temp[(dimX*dimY)*k1 + j1*dimX+i1] == ClassesList[class_end])) { /* We check that point A belongs to "class_start" and point B to "class_end". If they do then the idea is to check if "class_mid" (undesirable class) lies inbetween two classes. If it does -> replace it with "class_substitute". */ bresenham3D_combo(i, j, k, i1, j1, k1, MASK_temp, MASK_upd, CORRECTEDRegions, ClassesList, class_mid, class_substitute, (long)(dimX), (long)(dimY), (long)(dimZ)); } } }}} } return; } void bresenham3D_combo(int i, int j, int k, int i1, int j1, int k1, unsigned char *MASK, unsigned char *MASK_upd, unsigned char *CORRECTEDRegions, unsigned char *ClassesList, unsigned char class_mid, unsigned char class_substitute, long dimX, long dimY, long dimZ) { int P1[] = {i, j, k}; int P2[] = {i1, j1, k1}; int x1 = P1[0]; int y1 = P1[1]; int z1 = P1[2]; int x2 = P2[0]; int y2 = P2[1]; int z2 = P2[2]; int dx = x2 - x1; int dy = y2 - y1; int dz = z2 - z1; int ax = fabs(dx)*2; int ay = fabs(dy)*2; int az = fabs(dz)*2; int sx = signum(dx); int sy = signum(dy); int sz = signum(dz); int x = x1; int y = y1; int z = z1; int xd; int yd; int zd; //printf("ijk indeces: [%i][%i][%i]\n", i, j, k) ; if (ax >= max(ay, az)) { int yd = ay - ax/2; int zd = az - ax/2; while (1) { //fprintf(stderr,"\nid: %d ",idx); /* getting the indeces of voxels which were crossed by the line */ if (MASK[(dimX*dimY)*z + y*dimX+x] == ClassesList[class_mid]) { MASK_upd[(dimX*dimY)*z + y*dimX+x] = ClassesList[class_substitute]; CORRECTEDRegions[(dimX*dimY)*z + y*dimX+x] += 1; } if (x == x2) break; if (yd >= 0) { y = y + sy; // move along y yd = yd - ax; } if (zd >= 0) { z = z + sz; // % move along z zd = zd - ax; } x = x + sx; // move along x yd = yd + ay; zd = zd + az; } //while } // (ax>= fmax(ay,az)) else if (ay >= max(ax, az)) { xd = ax - ay/2; zd = az - ay/2; while (1) { if (MASK[(dimX*dimY)*z + y*dimX+x] == ClassesList[class_mid]) { MASK_upd[(dimX*dimY)*z + y*dimX+x] = ClassesList[class_substitute]; CORRECTEDRegions[(dimX*dimY)*z + y*dimX+x] += 1; } if (y == y2) break; if (xd >= 0) { x = x + sx; // move along x xd = xd - ay; } if (zd >= 0) { z = z + sz; //move along z zd = zd - ay; } y = y + sy; // % move along y xd = xd + ax; zd = zd + az; } // while } else if (az >= max(ax, ay)) { xd = ax - az/2; yd = ay - az/2; while (1) { if (MASK[(dimX*dimY)*z + y*dimX+x] == ClassesList[class_mid]) { MASK_upd[(dimX*dimY)*z + y*dimX+x] = ClassesList[class_substitute]; CORRECTEDRegions[(dimX*dimY)*z + y*dimX+x] += 1; } if(z == z2) break; if(xd >= 0) { x = x + sx; // move along x xd = xd - az; } if (yd >= 0) { y = y + sy; // % move along y yd = yd - az; } z = z + sz; //% move along z xd = xd + ax; yd = yd + ay; } //while loop } return; }
ipa-fnsummary.c
/* Function summary pass. Copyright (C) 2003-2020 Free Software Foundation, Inc. Contributed by Jan Hubicka This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Analysis of function bodies used by inter-procedural passes We estimate for each function - function body size and size after specializing into given context - average function execution time in a given context - function frame size For each call - call statement size, time and how often the parameters change ipa_fn_summary data structures store above information locally (i.e. parameters of the function itself) and globally (i.e. parameters of the function created by applying all the inline decisions already present in the callgraph). We provide access to the ipa_fn_summary data structure and basic logic updating the parameters when inlining is performed. The summaries are context sensitive. Context means 1) partial assignment of known constant values of operands 2) whether function is inlined into the call or not. It is easy to add more variants. To represent function size and time that depends on context (i.e. it is known to be optimized away when context is known either by inlining or from IP-CP and cloning), we use predicates. estimate_edge_size_and_time can be used to query function size/time in the given context. ipa_merge_fn_summary_after_inlining merges properties of caller and callee after inlining. Finally pass_inline_parameters is exported. This is used to drive computation of function parameters used by the early inliner. IPA inlined performs analysis via its analyze_function method. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "tree.h" #include "gimple.h" #include "alloc-pool.h" #include "tree-pass.h" #include "ssa.h" #include "tree-streamer.h" #include "cgraph.h" #include "diagnostic.h" #include "fold-const.h" #include "print-tree.h" #include "tree-inline.h" #include "gimple-pretty-print.h" #include "cfganal.h" #include "gimple-iterator.h" #include "tree-cfg.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "symbol-summary.h" #include "ipa-prop.h" #include "ipa-fnsummary.h" #include "cfgloop.h" #include "tree-scalar-evolution.h" #include "ipa-utils.h" #include "cfgexpand.h" #include "gimplify.h" #include "stringpool.h" #include "attribs.h" #include "tree-into-ssa.h" /* Summaries. */ fast_function_summary <ipa_fn_summary *, va_gc> *ipa_fn_summaries; fast_function_summary <ipa_size_summary *, va_heap> *ipa_size_summaries; fast_call_summary <ipa_call_summary *, va_heap> *ipa_call_summaries; /* Edge predicates goes here. */ static object_allocator<predicate> edge_predicate_pool ("edge predicates"); /* Dump IPA hints. */ void ipa_dump_hints (FILE *f, ipa_hints hints) { if (!hints) return; fprintf (f, "IPA hints:"); if (hints & INLINE_HINT_indirect_call) { hints &= ~INLINE_HINT_indirect_call; fprintf (f, " indirect_call"); } if (hints & INLINE_HINT_loop_iterations) { hints &= ~INLINE_HINT_loop_iterations; fprintf (f, " loop_iterations"); } if (hints & INLINE_HINT_loop_stride) { hints &= ~INLINE_HINT_loop_stride; fprintf (f, " loop_stride"); } if (hints & INLINE_HINT_same_scc) { hints &= ~INLINE_HINT_same_scc; fprintf (f, " same_scc"); } if (hints & INLINE_HINT_in_scc) { hints &= ~INLINE_HINT_in_scc; fprintf (f, " in_scc"); } if (hints & INLINE_HINT_cross_module) { hints &= ~INLINE_HINT_cross_module; fprintf (f, " cross_module"); } if (hints & INLINE_HINT_declared_inline) { hints &= ~INLINE_HINT_declared_inline; fprintf (f, " declared_inline"); } if (hints & INLINE_HINT_known_hot) { hints &= ~INLINE_HINT_known_hot; fprintf (f, " known_hot"); } gcc_assert (!hints); } /* Record SIZE and TIME to SUMMARY. The accounted code will be executed when EXEC_PRED is true. When NONCONST_PRED is false the code will evaluate to constant and will get optimized out in specialized clones of the function. If CALL is true account to call_size_time_table rather than size_time_table. */ void ipa_fn_summary::account_size_time (int size, sreal time, const predicate &exec_pred, const predicate &nonconst_pred_in, bool call) { size_time_entry *e; bool found = false; int i; predicate nonconst_pred; vec<size_time_entry, va_gc> *table = call ? call_size_time_table : size_time_table; if (exec_pred == false) return; nonconst_pred = nonconst_pred_in & exec_pred; if (nonconst_pred == false) return; /* We need to create initial empty unconditional clause, but otherwise we don't need to account empty times and sizes. */ if (!size && time == 0 && table) return; /* Only for calls we are unaccounting what we previously recorded. */ gcc_checking_assert (time >= 0 || call); for (i = 0; vec_safe_iterate (table, i, &e); i++) if (e->exec_predicate == exec_pred && e->nonconst_predicate == nonconst_pred) { found = true; break; } if (i == max_size_time_table_size) { i = 0; found = true; e = &(*table)[0]; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\t\tReached limit on number of entries, " "ignoring the predicate."); } if (dump_file && (dump_flags & TDF_DETAILS) && (time != 0 || size)) { fprintf (dump_file, "\t\tAccounting size:%3.2f, time:%3.2f on %spredicate exec:", ((double) size) / ipa_fn_summary::size_scale, (time.to_double ()), found ? "" : "new "); exec_pred.dump (dump_file, conds, 0); if (exec_pred != nonconst_pred) { fprintf (dump_file, " nonconst:"); nonconst_pred.dump (dump_file, conds); } else fprintf (dump_file, "\n"); } if (!found) { class size_time_entry new_entry; new_entry.size = size; new_entry.time = time; new_entry.exec_predicate = exec_pred; new_entry.nonconst_predicate = nonconst_pred; if (call) vec_safe_push (call_size_time_table, new_entry); else vec_safe_push (size_time_table, new_entry); } else { e->size += size; e->time += time; /* FIXME: PR bootstrap/92653 gcc_checking_assert (e->time >= -1); */ /* Tolerate small roundoff issues. */ if (e->time < 0) e->time = 0; } } /* We proved E to be unreachable, redirect it to __builtin_unreachable. */ static struct cgraph_edge * redirect_to_unreachable (struct cgraph_edge *e) { struct cgraph_node *callee = !e->inline_failed ? e->callee : NULL; struct cgraph_node *target = cgraph_node::get_create (builtin_decl_implicit (BUILT_IN_UNREACHABLE)); if (e->speculative) e = cgraph_edge::resolve_speculation (e, target->decl); else if (!e->callee) e = cgraph_edge::make_direct (e, target); else e->redirect_callee (target); class ipa_call_summary *es = ipa_call_summaries->get (e); e->inline_failed = CIF_UNREACHABLE; e->count = profile_count::zero (); es->call_stmt_size = 0; es->call_stmt_time = 0; if (callee) callee->remove_symbol_and_inline_clones (); return e; } /* Set predicate for edge E. */ static void edge_set_predicate (struct cgraph_edge *e, predicate *predicate) { /* If the edge is determined to be never executed, redirect it to BUILTIN_UNREACHABLE to make it clear to IPA passes the call will be optimized out. */ if (predicate && *predicate == false /* When handling speculative edges, we need to do the redirection just once. Do it always on the direct edge, so we do not attempt to resolve speculation while duplicating the edge. */ && (!e->speculative || e->callee)) e = redirect_to_unreachable (e); class ipa_call_summary *es = ipa_call_summaries->get (e); if (predicate && *predicate != true) { if (!es->predicate) es->predicate = edge_predicate_pool.allocate (); *es->predicate = *predicate; } else { if (es->predicate) edge_predicate_pool.remove (es->predicate); es->predicate = NULL; } } /* Set predicate for hint *P. */ static void set_hint_predicate (predicate **p, predicate new_predicate) { if (new_predicate == false || new_predicate == true) { if (*p) edge_predicate_pool.remove (*p); *p = NULL; } else { if (!*p) *p = edge_predicate_pool.allocate (); **p = new_predicate; } } /* Compute what conditions may or may not hold given information about parameters. RET_CLAUSE returns truths that may hold in a specialized copy, while RET_NONSPEC_CLAUSE returns truths that may hold in an nonspecialized copy when called in a given context. It is a bitmask of conditions. Bit 0 means that condition is known to be false, while bit 1 means that condition may or may not be true. These differs - for example NOT_INLINED condition is always false in the second and also builtin_constant_p tests cannot use the fact that parameter is indeed a constant. KNOWN_VALS is partial mapping of parameters of NODE to constant values. KNOWN_AGGS is a vector of aggregate known offset/value set for each parameter. Return clause of possible truths. When INLINE_P is true, assume that we are inlining. ERROR_MARK means compile time invariant. */ static void evaluate_conditions_for_known_args (struct cgraph_node *node, bool inline_p, vec<tree> known_vals, vec<value_range> known_value_ranges, vec<ipa_agg_value_set> known_aggs, clause_t *ret_clause, clause_t *ret_nonspec_clause) { clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition; clause_t nonspec_clause = 1 << predicate::not_inlined_condition; class ipa_fn_summary *info = ipa_fn_summaries->get (node); int i; struct condition *c; for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) { tree val = NULL; tree res; int j; struct expr_eval_op *op; /* We allow call stmt to have fewer arguments than the callee function (especially for K&R style programs). So bound check here (we assume known_aggs vector, if non-NULL, has the same length as known_vals). */ gcc_checking_assert (!known_aggs.length () || !known_vals.length () || (known_vals.length () == known_aggs.length ())); if (c->agg_contents) { struct ipa_agg_value_set *agg; if (c->code == predicate::changed && !c->by_ref && c->operand_num < (int)known_vals.length () && (known_vals[c->operand_num] == error_mark_node)) continue; if (c->operand_num < (int)known_aggs.length ()) { agg = &known_aggs[c->operand_num]; val = ipa_find_agg_cst_for_param (agg, c->operand_num < (int) known_vals.length () ? known_vals[c->operand_num] : NULL, c->offset, c->by_ref); } else val = NULL_TREE; } else if (c->operand_num < (int) known_vals.length ()) { val = known_vals[c->operand_num]; if (val == error_mark_node && c->code != predicate::changed) val = NULL_TREE; } if (!val && (c->code == predicate::changed || c->code == predicate::is_not_constant)) { clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (c->code == predicate::changed) { nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (c->code == predicate::is_not_constant) { nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } if (val && TYPE_SIZE (c->type) == TYPE_SIZE (TREE_TYPE (val))) { if (c->type != TREE_TYPE (val)) val = fold_unary (VIEW_CONVERT_EXPR, c->type, val); for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++) { if (!val) break; if (!op->val[0]) val = fold_unary (op->code, op->type, val); else if (!op->val[1]) val = fold_binary (op->code, op->type, op->index ? op->val[0] : val, op->index ? val : op->val[0]); else if (op->index == 0) val = fold_ternary (op->code, op->type, val, op->val[0], op->val[1]); else if (op->index == 1) val = fold_ternary (op->code, op->type, op->val[0], val, op->val[1]); else if (op->index == 2) val = fold_ternary (op->code, op->type, op->val[0], op->val[1], val); else val = NULL_TREE; } res = val ? fold_binary_to_constant (c->code, boolean_type_node, val, c->val) : NULL; if (res && integer_zerop (res)) continue; if (res && integer_onep (res)) { clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); continue; } } if (c->operand_num < (int) known_value_ranges.length () && !c->agg_contents && !known_value_ranges[c->operand_num].undefined_p () && !known_value_ranges[c->operand_num].varying_p () && TYPE_SIZE (c->type) == TYPE_SIZE (known_value_ranges[c->operand_num].type ()) && (!val || TREE_CODE (val) != INTEGER_CST)) { value_range vr = known_value_ranges[c->operand_num]; if (!useless_type_conversion_p (c->type, vr.type ())) { value_range res; range_fold_unary_expr (&res, NOP_EXPR, c->type, &vr, vr.type ()); vr = res; } tree type = c->type; for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++) { if (vr.varying_p () || vr.undefined_p ()) break; value_range res; if (!op->val[0]) range_fold_unary_expr (&res, op->code, op->type, &vr, type); else if (!op->val[1]) { value_range op0 (op->val[0], op->val[0]); range_fold_binary_expr (&res, op->code, op->type, op->index ? &op0 : &vr, op->index ? &vr : &op0); } else gcc_unreachable (); type = op->type; vr = res; } if (!vr.varying_p () && !vr.undefined_p ()) { value_range res; value_range val_vr (c->val, c->val); range_fold_binary_expr (&res, c->code, boolean_type_node, &vr, &val_vr); if (res.zero_p ()) continue; } } clause |= 1 << (i + predicate::first_dynamic_condition); nonspec_clause |= 1 << (i + predicate::first_dynamic_condition); } *ret_clause = clause; if (ret_nonspec_clause) *ret_nonspec_clause = nonspec_clause; } /* Return true if VRP will be exectued on the function. We do not want to anticipate optimizations that will not happen. FIXME: This can be confused with -fdisable and debug counters and thus it should not be used for correctness (only to make heuristics work). This means that inliner should do its own optimizations of expressions that it predicts to be constant so wrong code can not be triggered by builtin_constant_p. */ static bool vrp_will_run_p (struct cgraph_node *node) { return (opt_for_fn (node->decl, optimize) && !opt_for_fn (node->decl, optimize_debug) && opt_for_fn (node->decl, flag_tree_vrp)); } /* Similarly about FRE. */ static bool fre_will_run_p (struct cgraph_node *node) { return (opt_for_fn (node->decl, optimize) && !opt_for_fn (node->decl, optimize_debug) && opt_for_fn (node->decl, flag_tree_fre)); } /* Work out what conditions might be true at invocation of E. Compute costs for inlined edge if INLINE_P is true. Return in CLAUSE_PTR the evaluated conditions and in NONSPEC_CLAUSE_PTR (if non-NULL) conditions evaluated for nonspecialized clone called in a given context. KNOWN_VALS_PTR and KNOWN_AGGS_PTR must be non-NULL and will be filled by known constant and aggregate values of parameters. KNOWN_CONTEXT_PTR, if non-NULL, will be filled by polymorphic call contexts of parameter used by a polymorphic call. */ void evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p, clause_t *clause_ptr, clause_t *nonspec_clause_ptr, vec<tree> *known_vals_ptr, vec<ipa_polymorphic_call_context> *known_contexts_ptr, vec<ipa_agg_value_set> *known_aggs_ptr) { struct cgraph_node *callee = e->callee->ultimate_alias_target (); class ipa_fn_summary *info = ipa_fn_summaries->get (callee); auto_vec<value_range, 32> known_value_ranges; class ipa_edge_args *args; if (clause_ptr) *clause_ptr = inline_p ? 0 : 1 << predicate::not_inlined_condition; if (ipa_node_params_sum && !e->call_stmt_cannot_inline_p && (info->conds || known_contexts_ptr) && (args = IPA_EDGE_REF (e)) != NULL) { struct cgraph_node *caller; class ipa_node_params *caller_parms_info, *callee_pi = NULL; class ipa_call_summary *es = ipa_call_summaries->get (e); int i, count = ipa_get_cs_argument_count (args); if (count) { if (e->caller->inlined_to) caller = e->caller->inlined_to; else caller = e->caller; caller_parms_info = IPA_NODE_REF (caller); callee_pi = IPA_NODE_REF (callee); /* Watch for thunks. */ if (callee_pi) /* Watch for variadic functions. */ count = MIN (count, ipa_get_param_count (callee_pi)); } if (callee_pi) for (i = 0; i < count; i++) { struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i); if (ipa_is_param_used_by_indirect_call (callee_pi, i) || ipa_is_param_used_by_ipa_predicates (callee_pi, i)) { /* Determine if we know constant value of the parameter. */ tree cst = ipa_value_from_jfunc (caller_parms_info, jf, ipa_get_type (callee_pi, i)); if (!cst && e->call_stmt && i < (int)gimple_call_num_args (e->call_stmt)) { cst = gimple_call_arg (e->call_stmt, i); if (!is_gimple_min_invariant (cst)) cst = NULL; } if (cst) { gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO); if (!known_vals_ptr->length ()) vec_safe_grow_cleared (known_vals_ptr, count); (*known_vals_ptr)[i] = cst; } else if (inline_p && !es->param[i].change_prob) { if (!known_vals_ptr->length ()) vec_safe_grow_cleared (known_vals_ptr, count); (*known_vals_ptr)[i] = error_mark_node; } /* If we failed to get simple constant, try value range. */ if ((!cst || TREE_CODE (cst) != INTEGER_CST) && vrp_will_run_p (caller) && ipa_is_param_used_by_ipa_predicates (callee_pi, i)) { value_range vr = ipa_value_range_from_jfunc (caller_parms_info, e, jf, ipa_get_type (callee_pi, i)); if (!vr.undefined_p () && !vr.varying_p ()) { if (!known_value_ranges.length ()) known_value_ranges.safe_grow_cleared (count); known_value_ranges[i] = vr; } } /* Determine known aggregate values. */ if (fre_will_run_p (caller)) { ipa_agg_value_set agg = ipa_agg_value_set_from_jfunc (caller_parms_info, caller, &jf->agg); if (agg.items.length ()) { if (!known_aggs_ptr->length ()) vec_safe_grow_cleared (known_aggs_ptr, count); (*known_aggs_ptr)[i] = agg; } } } /* For calls used in polymorphic calls we further determine polymorphic call context. */ if (known_contexts_ptr && ipa_is_param_used_by_polymorphic_call (callee_pi, i)) { ipa_polymorphic_call_context ctx = ipa_context_from_jfunc (caller_parms_info, e, i, jf); if (!ctx.useless_p ()) { if (!known_contexts_ptr->length ()) known_contexts_ptr->safe_grow_cleared (count); (*known_contexts_ptr)[i] = ipa_context_from_jfunc (caller_parms_info, e, i, jf); } } } else gcc_assert (!count || callee->thunk.thunk_p); } else if (e->call_stmt && !e->call_stmt_cannot_inline_p && info->conds) { int i, count = (int)gimple_call_num_args (e->call_stmt); for (i = 0; i < count; i++) { tree cst = gimple_call_arg (e->call_stmt, i); if (!is_gimple_min_invariant (cst)) cst = NULL; if (cst) { if (!known_vals_ptr->length ()) vec_safe_grow_cleared (known_vals_ptr, count); (*known_vals_ptr)[i] = cst; } } } evaluate_conditions_for_known_args (callee, inline_p, *known_vals_ptr, known_value_ranges, *known_aggs_ptr, clause_ptr, nonspec_clause_ptr); } /* Allocate the function summary. */ static void ipa_fn_summary_alloc (void) { gcc_checking_assert (!ipa_fn_summaries); ipa_size_summaries = new ipa_size_summary_t (symtab); ipa_fn_summaries = ipa_fn_summary_t::create_ggc (symtab); ipa_call_summaries = new ipa_call_summary_t (symtab); } ipa_call_summary::~ipa_call_summary () { if (predicate) edge_predicate_pool.remove (predicate); param.release (); } ipa_fn_summary::~ipa_fn_summary () { if (loop_iterations) edge_predicate_pool.remove (loop_iterations); if (loop_stride) edge_predicate_pool.remove (loop_stride); vec_free (conds); vec_free (size_time_table); vec_free (call_size_time_table); } void ipa_fn_summary_t::remove_callees (cgraph_node *node) { cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) ipa_call_summaries->remove (e); for (e = node->indirect_calls; e; e = e->next_callee) ipa_call_summaries->remove (e); } /* Same as remap_predicate_after_duplication but handle hint predicate *P. Additionally care about allocating new memory slot for updated predicate and set it to NULL when it becomes true or false (and thus uninteresting). */ static void remap_hint_predicate_after_duplication (predicate **p, clause_t possible_truths) { predicate new_predicate; if (!*p) return; new_predicate = (*p)->remap_after_duplication (possible_truths); /* We do not want to free previous predicate; it is used by node origin. */ *p = NULL; set_hint_predicate (p, new_predicate); } /* Hook that is called by cgraph.c when a node is duplicated. */ void ipa_fn_summary_t::duplicate (cgraph_node *src, cgraph_node *dst, ipa_fn_summary *, ipa_fn_summary *info) { new (info) ipa_fn_summary (*ipa_fn_summaries->get (src)); /* TODO: as an optimization, we may avoid copying conditions that are known to be false or true. */ info->conds = vec_safe_copy (info->conds); /* When there are any replacements in the function body, see if we can figure out that something was optimized out. */ if (ipa_node_params_sum && dst->clone.tree_map) { vec<size_time_entry, va_gc> *entry = info->size_time_table; /* Use SRC parm info since it may not be copied yet. */ class ipa_node_params *parms_info = IPA_NODE_REF (src); vec<tree> known_vals = vNULL; int count = ipa_get_param_count (parms_info); int i, j; clause_t possible_truths; predicate true_pred = true; size_time_entry *e; int optimized_out_size = 0; bool inlined_to_p = false; struct cgraph_edge *edge, *next; info->size_time_table = 0; known_vals.safe_grow_cleared (count); for (i = 0; i < count; i++) { struct ipa_replace_map *r; for (j = 0; vec_safe_iterate (dst->clone.tree_map, j, &r); j++) { if (r->parm_num == i) { known_vals[i] = r->new_tree; break; } } } evaluate_conditions_for_known_args (dst, false, known_vals, vNULL, vNULL, &possible_truths, /* We are going to specialize, so ignore nonspec truths. */ NULL); known_vals.release (); info->account_size_time (0, 0, true_pred, true_pred); /* Remap size_time vectors. Simplify the predicate by pruning out alternatives that are known to be false. TODO: as on optimization, we can also eliminate conditions known to be true. */ for (i = 0; vec_safe_iterate (entry, i, &e); i++) { predicate new_exec_pred; predicate new_nonconst_pred; new_exec_pred = e->exec_predicate.remap_after_duplication (possible_truths); new_nonconst_pred = e->nonconst_predicate.remap_after_duplication (possible_truths); if (new_exec_pred == false || new_nonconst_pred == false) optimized_out_size += e->size; else info->account_size_time (e->size, e->time, new_exec_pred, new_nonconst_pred); } /* Remap edge predicates with the same simplification as above. Also copy constantness arrays. */ for (edge = dst->callees; edge; edge = next) { predicate new_predicate; class ipa_call_summary *es = ipa_call_summaries->get (edge); next = edge->next_callee; if (!edge->inline_failed) inlined_to_p = true; if (!es->predicate) continue; new_predicate = es->predicate->remap_after_duplication (possible_truths); if (new_predicate == false && *es->predicate != false) optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale; edge_set_predicate (edge, &new_predicate); } /* Remap indirect edge predicates with the same simplification as above. Also copy constantness arrays. */ for (edge = dst->indirect_calls; edge; edge = next) { predicate new_predicate; class ipa_call_summary *es = ipa_call_summaries->get (edge); next = edge->next_callee; gcc_checking_assert (edge->inline_failed); if (!es->predicate) continue; new_predicate = es->predicate->remap_after_duplication (possible_truths); if (new_predicate == false && *es->predicate != false) optimized_out_size += es->call_stmt_size * ipa_fn_summary::size_scale; edge_set_predicate (edge, &new_predicate); } remap_hint_predicate_after_duplication (&info->loop_iterations, possible_truths); remap_hint_predicate_after_duplication (&info->loop_stride, possible_truths); /* If inliner or someone after inliner will ever start producing non-trivial clones, we will get trouble with lack of information about updating self sizes, because size vectors already contains sizes of the callees. */ gcc_assert (!inlined_to_p || !optimized_out_size); } else { info->size_time_table = vec_safe_copy (info->size_time_table); if (info->loop_iterations) { predicate p = *info->loop_iterations; info->loop_iterations = NULL; set_hint_predicate (&info->loop_iterations, p); } if (info->loop_stride) { predicate p = *info->loop_stride; info->loop_stride = NULL; set_hint_predicate (&info->loop_stride, p); } } if (!dst->inlined_to) ipa_update_overall_fn_summary (dst); } /* Hook that is called by cgraph.c when a node is duplicated. */ void ipa_call_summary_t::duplicate (struct cgraph_edge *src, struct cgraph_edge *dst, class ipa_call_summary *srcinfo, class ipa_call_summary *info) { new (info) ipa_call_summary (*srcinfo); info->predicate = NULL; edge_set_predicate (dst, srcinfo->predicate); info->param = srcinfo->param.copy (); if (!dst->indirect_unknown_callee && src->indirect_unknown_callee) { info->call_stmt_size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost); info->call_stmt_time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost); } } /* Dump edge summaries associated to NODE and recursively to all clones. Indent by INDENT. */ static void dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node, class ipa_fn_summary *info) { struct cgraph_edge *edge; for (edge = node->callees; edge; edge = edge->next_callee) { class ipa_call_summary *es = ipa_call_summaries->get (edge); struct cgraph_node *callee = edge->callee->ultimate_alias_target (); int i; fprintf (f, "%*s%s %s\n%*s freq:%4.2f", indent, "", callee->dump_name (), !edge->inline_failed ? "inlined" : cgraph_inline_failed_string (edge-> inline_failed), indent, "", edge->sreal_frequency ().to_double ()); if (cross_module_call_p (edge)) fprintf (f, " cross module"); if (es) fprintf (f, " loop depth:%2i size:%2i time: %2i", es->loop_depth, es->call_stmt_size, es->call_stmt_time); ipa_fn_summary *s = ipa_fn_summaries->get (callee); ipa_size_summary *ss = ipa_size_summaries->get (callee); if (s != NULL) fprintf (f, " callee size:%2i stack:%2i", (int) (ss->size / ipa_fn_summary::size_scale), (int) s->estimated_stack_size); if (es && es->predicate) { fprintf (f, " predicate: "); es->predicate->dump (f, info->conds); } else fprintf (f, "\n"); if (es && es->param.exists ()) for (i = 0; i < (int) es->param.length (); i++) { int prob = es->param[i].change_prob; if (!prob) fprintf (f, "%*s op%i is compile time invariant\n", indent + 2, "", i); else if (prob != REG_BR_PROB_BASE) fprintf (f, "%*s op%i change %f%% of time\n", indent + 2, "", i, prob * 100.0 / REG_BR_PROB_BASE); } if (!edge->inline_failed) { ipa_size_summary *ss = ipa_size_summaries->get (callee); fprintf (f, "%*sStack frame offset %i, callee self size %i\n", indent + 2, "", (int) ipa_get_stack_frame_offset (callee), (int) ss->estimated_self_stack_size); dump_ipa_call_summary (f, indent + 2, callee, info); } } for (edge = node->indirect_calls; edge; edge = edge->next_callee) { class ipa_call_summary *es = ipa_call_summaries->get (edge); fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i" " time: %2i", indent, "", es->loop_depth, edge->sreal_frequency ().to_double (), es->call_stmt_size, es->call_stmt_time); if (es->predicate) { fprintf (f, "predicate: "); es->predicate->dump (f, info->conds); } else fprintf (f, "\n"); } } void ipa_dump_fn_summary (FILE *f, struct cgraph_node *node) { if (node->definition) { class ipa_fn_summary *s = ipa_fn_summaries->get (node); class ipa_size_summary *ss = ipa_size_summaries->get (node); if (s != NULL) { size_time_entry *e; int i; fprintf (f, "IPA function summary for %s", node->dump_name ()); if (DECL_DISREGARD_INLINE_LIMITS (node->decl)) fprintf (f, " always_inline"); if (s->inlinable) fprintf (f, " inlinable"); if (s->fp_expressions) fprintf (f, " fp_expression"); fprintf (f, "\n global time: %f\n", s->time.to_double ()); fprintf (f, " self size: %i\n", ss->self_size); fprintf (f, " global size: %i\n", ss->size); fprintf (f, " min size: %i\n", s->min_size); fprintf (f, " self stack: %i\n", (int) ss->estimated_self_stack_size); fprintf (f, " global stack: %i\n", (int) s->estimated_stack_size); if (s->growth) fprintf (f, " estimated growth:%i\n", (int) s->growth); if (s->scc_no) fprintf (f, " In SCC: %i\n", (int) s->scc_no); for (i = 0; vec_safe_iterate (s->size_time_table, i, &e); i++) { fprintf (f, " size:%f, time:%f", (double) e->size / ipa_fn_summary::size_scale, e->time.to_double ()); if (e->exec_predicate != true) { fprintf (f, ", executed if:"); e->exec_predicate.dump (f, s->conds, 0); } if (e->exec_predicate != e->nonconst_predicate) { fprintf (f, ", nonconst if:"); e->nonconst_predicate.dump (f, s->conds, 0); } fprintf (f, "\n"); } if (s->loop_iterations) { fprintf (f, " loop iterations:"); s->loop_iterations->dump (f, s->conds); } if (s->loop_stride) { fprintf (f, " loop stride:"); s->loop_stride->dump (f, s->conds); } fprintf (f, " calls:\n"); dump_ipa_call_summary (f, 4, node, s); fprintf (f, "\n"); } else fprintf (f, "IPA summary for %s is missing.\n", node->dump_name ()); } } DEBUG_FUNCTION void ipa_debug_fn_summary (struct cgraph_node *node) { ipa_dump_fn_summary (stderr, node); } void ipa_dump_fn_summaries (FILE *f) { struct cgraph_node *node; FOR_EACH_DEFINED_FUNCTION (node) if (!node->inlined_to) ipa_dump_fn_summary (f, node); } /* Callback of walk_aliased_vdefs. Flags that it has been invoked to the boolean variable pointed to by DATA. */ static bool mark_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef ATTRIBUTE_UNUSED, void *data) { bool *b = (bool *) data; *b = true; return true; } /* If OP refers to value of function parameter, return the corresponding parameter. If non-NULL, the size of the memory load (or the SSA_NAME of the PARM_DECL) will be stored to *SIZE_P in that case too. */ static tree unmodified_parm_1 (ipa_func_body_info *fbi, gimple *stmt, tree op, poly_int64 *size_p) { /* SSA_NAME referring to parm default def? */ if (TREE_CODE (op) == SSA_NAME && SSA_NAME_IS_DEFAULT_DEF (op) && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL) { if (size_p) *size_p = tree_to_poly_int64 (TYPE_SIZE (TREE_TYPE (op))); return SSA_NAME_VAR (op); } /* Non-SSA parm reference? */ if (TREE_CODE (op) == PARM_DECL) { bool modified = false; ao_ref refd; ao_ref_init (&refd, op); int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt), mark_modified, &modified, NULL, NULL, fbi->aa_walk_budget + 1); if (walked < 0) { fbi->aa_walk_budget = 0; return NULL_TREE; } if (!modified) { if (size_p) *size_p = tree_to_poly_int64 (TYPE_SIZE (TREE_TYPE (op))); return op; } } return NULL_TREE; } /* If OP refers to value of function parameter, return the corresponding parameter. Also traverse chains of SSA register assignments. If non-NULL, the size of the memory load (or the SSA_NAME of the PARM_DECL) will be stored to *SIZE_P in that case too. */ static tree unmodified_parm (ipa_func_body_info *fbi, gimple *stmt, tree op, poly_int64 *size_p) { tree res = unmodified_parm_1 (fbi, stmt, op, size_p); if (res) return res; if (TREE_CODE (op) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (op) && gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) return unmodified_parm (fbi, SSA_NAME_DEF_STMT (op), gimple_assign_rhs1 (SSA_NAME_DEF_STMT (op)), size_p); return NULL_TREE; } /* If OP refers to a value of a function parameter or value loaded from an aggregate passed to a parameter (either by value or reference), return TRUE and store the number of the parameter to *INDEX_P, the access size into *SIZE_P, and information whether and how it has been loaded from an aggregate into *AGGPOS. INFO describes the function parameters, STMT is the statement in which OP is used or loaded. */ static bool unmodified_parm_or_parm_agg_item (struct ipa_func_body_info *fbi, gimple *stmt, tree op, int *index_p, poly_int64 *size_p, struct agg_position_info *aggpos) { tree res = unmodified_parm_1 (fbi, stmt, op, size_p); gcc_checking_assert (aggpos); if (res) { *index_p = ipa_get_param_decl_index (fbi->info, res); if (*index_p < 0) return false; aggpos->agg_contents = false; aggpos->by_ref = false; return true; } if (TREE_CODE (op) == SSA_NAME) { if (SSA_NAME_IS_DEFAULT_DEF (op) || !gimple_assign_single_p (SSA_NAME_DEF_STMT (op))) return false; stmt = SSA_NAME_DEF_STMT (op); op = gimple_assign_rhs1 (stmt); if (!REFERENCE_CLASS_P (op)) return unmodified_parm_or_parm_agg_item (fbi, stmt, op, index_p, size_p, aggpos); } aggpos->agg_contents = true; return ipa_load_from_parm_agg (fbi, fbi->info->descriptors, stmt, op, index_p, &aggpos->offset, size_p, &aggpos->by_ref); } /* See if statement might disappear after inlining. 0 - means not eliminated 1 - half of statements goes away 2 - for sure it is eliminated. We are not terribly sophisticated, basically looking for simple abstraction penalty wrappers. */ static int eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt) { enum gimple_code code = gimple_code (stmt); enum tree_code rhs_code; if (!optimize) return 0; switch (code) { case GIMPLE_RETURN: return 2; case GIMPLE_ASSIGN: if (gimple_num_ops (stmt) != 2) return 0; rhs_code = gimple_assign_rhs_code (stmt); /* Casts of parameters, loads from parameters passed by reference and stores to return value or parameters are often free after inlining due to SRA and further combining. Assume that half of statements goes away. */ if (CONVERT_EXPR_CODE_P (rhs_code) || rhs_code == VIEW_CONVERT_EXPR || rhs_code == ADDR_EXPR || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) { tree rhs = gimple_assign_rhs1 (stmt); tree lhs = gimple_assign_lhs (stmt); tree inner_rhs = get_base_address (rhs); tree inner_lhs = get_base_address (lhs); bool rhs_free = false; bool lhs_free = false; if (!inner_rhs) inner_rhs = rhs; if (!inner_lhs) inner_lhs = lhs; /* Reads of parameter are expected to be free. */ if (unmodified_parm (fbi, stmt, inner_rhs, NULL)) rhs_free = true; /* Match expressions of form &this->field. Those will most likely combine with something upstream after inlining. */ else if (TREE_CODE (inner_rhs) == ADDR_EXPR) { tree op = get_base_address (TREE_OPERAND (inner_rhs, 0)); if (TREE_CODE (op) == PARM_DECL) rhs_free = true; else if (TREE_CODE (op) == MEM_REF && unmodified_parm (fbi, stmt, TREE_OPERAND (op, 0), NULL)) rhs_free = true; } /* When parameter is not SSA register because its address is taken and it is just copied into one, the statement will be completely free after inlining (we will copy propagate backward). */ if (rhs_free && is_gimple_reg (lhs)) return 2; /* Reads of parameters passed by reference expected to be free (i.e. optimized out after inlining). */ if (TREE_CODE (inner_rhs) == MEM_REF && unmodified_parm (fbi, stmt, TREE_OPERAND (inner_rhs, 0), NULL)) rhs_free = true; /* Copying parameter passed by reference into gimple register is probably also going to copy propagate, but we can't be quite sure. */ if (rhs_free && is_gimple_reg (lhs)) lhs_free = true; /* Writes to parameters, parameters passed by value and return value (either directly or passed via invisible reference) are free. TODO: We ought to handle testcase like struct a {int a,b;}; struct a returnstruct (void) { struct a a ={1,2}; return a; } This translate into: returnstruct () { int a$b; int a$a; struct a a; struct a D.2739; <bb 2>: D.2739.a = 1; D.2739.b = 2; return D.2739; } For that we either need to copy ipa-split logic detecting writes to return value. */ if (TREE_CODE (inner_lhs) == PARM_DECL || TREE_CODE (inner_lhs) == RESULT_DECL || (TREE_CODE (inner_lhs) == MEM_REF && (unmodified_parm (fbi, stmt, TREE_OPERAND (inner_lhs, 0), NULL) || (TREE_CODE (TREE_OPERAND (inner_lhs, 0)) == SSA_NAME && SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0)) && TREE_CODE (SSA_NAME_VAR (TREE_OPERAND (inner_lhs, 0))) == RESULT_DECL)))) lhs_free = true; if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs))) rhs_free = true; if (lhs_free && rhs_free) return 1; } return 0; default: return 0; } } /* Analyze EXPR if it represents a series of simple operations performed on a function parameter and return true if so. FBI, STMT, EXPR, INDEX_P and AGGPOS have the same meaning like in unmodified_parm_or_parm_agg_item. Type of the parameter or load from an aggregate via the parameter is stored in *TYPE_P. Operations on the parameter are recorded to PARAM_OPS_P if it is not NULL. */ static bool decompose_param_expr (struct ipa_func_body_info *fbi, gimple *stmt, tree expr, int *index_p, tree *type_p, struct agg_position_info *aggpos, expr_eval_ops *param_ops_p = NULL) { int op_limit = opt_for_fn (fbi->node->decl, param_ipa_max_param_expr_ops); int op_count = 0; if (param_ops_p) *param_ops_p = NULL; while (true) { expr_eval_op eval_op; unsigned rhs_count; unsigned cst_count = 0; if (unmodified_parm_or_parm_agg_item (fbi, stmt, expr, index_p, NULL, aggpos)) { tree type = TREE_TYPE (expr); if (aggpos->agg_contents) { /* Stop if containing bit-field. */ if (TREE_CODE (expr) == BIT_FIELD_REF || contains_bitfld_component_ref_p (expr)) break; } *type_p = type; return true; } if (TREE_CODE (expr) != SSA_NAME || SSA_NAME_IS_DEFAULT_DEF (expr)) break; if (!is_gimple_assign (stmt = SSA_NAME_DEF_STMT (expr))) break; switch (gimple_assign_rhs_class (stmt)) { case GIMPLE_SINGLE_RHS: expr = gimple_assign_rhs1 (stmt); continue; case GIMPLE_UNARY_RHS: rhs_count = 1; break; case GIMPLE_BINARY_RHS: rhs_count = 2; break; case GIMPLE_TERNARY_RHS: rhs_count = 3; break; default: goto fail; } /* Stop if expression is too complex. */ if (op_count++ == op_limit) break; if (param_ops_p) { eval_op.code = gimple_assign_rhs_code (stmt); eval_op.type = TREE_TYPE (gimple_assign_lhs (stmt)); eval_op.val[0] = NULL_TREE; eval_op.val[1] = NULL_TREE; } expr = NULL_TREE; for (unsigned i = 0; i < rhs_count; i++) { tree op = gimple_op (stmt, i + 1); gcc_assert (op && !TYPE_P (op)); if (is_gimple_ip_invariant (op)) { if (++cst_count == rhs_count) goto fail; eval_op.val[cst_count - 1] = op; } else if (!expr) { /* Found a non-constant operand, and record its index in rhs operands. */ eval_op.index = i; expr = op; } else { /* Found more than one non-constant operands. */ goto fail; } } if (param_ops_p) vec_safe_insert (*param_ops_p, 0, eval_op); } /* Failed to decompose, free resource and return. */ fail: if (param_ops_p) vec_free (*param_ops_p); return false; } /* If BB ends by a conditional we can turn into predicates, attach corresponding predicates to the CFG edges. */ static void set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi, class ipa_fn_summary *summary, class ipa_node_params *params_summary, basic_block bb) { gimple *last; tree op, op2; int index; struct agg_position_info aggpos; enum tree_code code, inverted_code; edge e; edge_iterator ei; gimple *set_stmt; tree param_type; expr_eval_ops param_ops; last = last_stmt (bb); if (!last || gimple_code (last) != GIMPLE_COND) return; if (!is_gimple_ip_invariant (gimple_cond_rhs (last))) return; op = gimple_cond_lhs (last); if (decompose_param_expr (fbi, last, op, &index, &param_type, &aggpos, &param_ops)) { code = gimple_cond_code (last); inverted_code = invert_tree_comparison (code, HONOR_NANS (op)); FOR_EACH_EDGE (e, ei, bb->succs) { enum tree_code this_code = (e->flags & EDGE_TRUE_VALUE ? code : inverted_code); /* invert_tree_comparison will return ERROR_MARK on FP comparisons that are not EQ/NE instead of returning proper unordered one. Be sure it is not confused with NON_CONSTANT. And if the edge's target is the final block of diamond CFG graph of this conditional statement, we do not need to compute predicate for the edge because the final block's predicate must be at least as that of the first block of the statement. */ if (this_code != ERROR_MARK && !dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest)) { predicate p = add_condition (summary, params_summary, index, param_type, &aggpos, this_code, gimple_cond_rhs (last), param_ops); e->aux = edge_predicate_pool.allocate (); *(predicate *) e->aux = p; } } vec_free (param_ops); } if (TREE_CODE (op) != SSA_NAME) return; /* Special case if (builtin_constant_p (op)) constant_code else nonconstant_code. Here we can predicate nonconstant_code. We can't really handle constant_code since we have no predicate for this and also the constant code is not known to be optimized away when inliner doesn't see operand is constant. Other optimizers might think otherwise. */ if (gimple_cond_code (last) != NE_EXPR || !integer_zerop (gimple_cond_rhs (last))) return; set_stmt = SSA_NAME_DEF_STMT (op); if (!gimple_call_builtin_p (set_stmt, BUILT_IN_CONSTANT_P) || gimple_call_num_args (set_stmt) != 1) return; op2 = gimple_call_arg (set_stmt, 0); if (!decompose_param_expr (fbi, set_stmt, op2, &index, &param_type, &aggpos)) return; FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALSE_VALUE) { predicate p = add_condition (summary, params_summary, index, param_type, &aggpos, predicate::is_not_constant, NULL_TREE); e->aux = edge_predicate_pool.allocate (); *(predicate *) e->aux = p; } } /* If BB ends by a switch we can turn into predicates, attach corresponding predicates to the CFG edges. */ static void set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi, class ipa_fn_summary *summary, class ipa_node_params *params_summary, basic_block bb) { gimple *lastg; tree op; int index; struct agg_position_info aggpos; edge e; edge_iterator ei; size_t n; size_t case_idx; tree param_type; expr_eval_ops param_ops; lastg = last_stmt (bb); if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH) return; gswitch *last = as_a <gswitch *> (lastg); op = gimple_switch_index (last); if (!decompose_param_expr (fbi, last, op, &index, &param_type, &aggpos, &param_ops)) return; auto_vec<std::pair<tree, tree> > ranges; tree type = TREE_TYPE (op); int bound_limit = opt_for_fn (fbi->node->decl, param_ipa_max_switch_predicate_bounds); int bound_count = 0; wide_int vr_wmin, vr_wmax; value_range_kind vr_type = get_range_info (op, &vr_wmin, &vr_wmax); FOR_EACH_EDGE (e, ei, bb->succs) { e->aux = edge_predicate_pool.allocate (); *(predicate *) e->aux = false; } e = gimple_switch_edge (cfun, last, 0); /* Set BOUND_COUNT to maximum count to bypass computing predicate for default case if its target basic block is in convergence point of all switch cases, which can be determined by checking whether it post-dominates the switch statement. */ if (dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest)) bound_count = INT_MAX; n = gimple_switch_num_labels (last); for (case_idx = 1; case_idx < n; ++case_idx) { tree cl = gimple_switch_label (last, case_idx); tree min = CASE_LOW (cl); tree max = CASE_HIGH (cl); predicate p; e = gimple_switch_edge (cfun, last, case_idx); /* The case value might not have same type as switch expression, extend the value based on the expression type. */ if (TREE_TYPE (min) != type) min = wide_int_to_tree (type, wi::to_wide (min)); if (!max) max = min; else if (TREE_TYPE (max) != type) max = wide_int_to_tree (type, wi::to_wide (max)); /* The case's target basic block is in convergence point of all switch cases, its predicate should be at least as that of the switch statement. */ if (dominated_by_p (CDI_POST_DOMINATORS, bb, e->dest)) p = true; else if (min == max) p = add_condition (summary, params_summary, index, param_type, &aggpos, EQ_EXPR, min, param_ops); else { predicate p1, p2; p1 = add_condition (summary, params_summary, index, param_type, &aggpos, GE_EXPR, min, param_ops); p2 = add_condition (summary, params_summary,index, param_type, &aggpos, LE_EXPR, max, param_ops); p = p1 & p2; } *(class predicate *) e->aux = p.or_with (summary->conds, *(class predicate *) e->aux); /* If there are too many disjoint case ranges, predicate for default case might become too complicated. So add a limit here. */ if (bound_count > bound_limit) continue; bool new_range = true; if (!ranges.is_empty ()) { wide_int curr_wmin = wi::to_wide (min); wide_int last_wmax = wi::to_wide (ranges.last ().second); /* Merge case ranges if they are continuous. */ if (curr_wmin == last_wmax + 1) new_range = false; else if (vr_type == VR_ANTI_RANGE) { /* If two disjoint case ranges can be connected by anti-range of switch index, combine them to one range. */ if (wi::lt_p (vr_wmax, curr_wmin - 1, TYPE_SIGN (type))) vr_type = VR_UNDEFINED; else if (wi::le_p (vr_wmin, last_wmax + 1, TYPE_SIGN (type))) new_range = false; } } /* Create/extend a case range. And we count endpoints of range set, this number nearly equals to number of conditions that we will create for predicate of default case. */ if (new_range) { bound_count += (min == max) ? 1 : 2; ranges.safe_push (std::make_pair (min, max)); } else { bound_count += (ranges.last ().first == ranges.last ().second); ranges.last ().second = max; } } e = gimple_switch_edge (cfun, last, 0); if (bound_count > bound_limit) { *(class predicate *) e->aux = true; vec_free (param_ops); return; } predicate p_seg = true; predicate p_all = false; if (vr_type != VR_RANGE) { vr_wmin = wi::to_wide (TYPE_MIN_VALUE (type)); vr_wmax = wi::to_wide (TYPE_MAX_VALUE (type)); } /* Construct predicate to represent default range set that is negation of all case ranges. Case range is classified as containing single/non-single values. Suppose a piece of case ranges in the following. [D1...D2] [S1] ... [Sn] [D3...D4] To represent default case's range sets between two non-single value case ranges (From D2 to D3), we construct predicate as: D2 < x < D3 && x != S1 && ... && x != Sn */ for (size_t i = 0; i < ranges.length (); i++) { tree min = ranges[i].first; tree max = ranges[i].second; if (min == max) p_seg &= add_condition (summary, params_summary, index, param_type, &aggpos, NE_EXPR, min, param_ops); else { /* Do not create sub-predicate for range that is beyond low bound of switch index. */ if (wi::lt_p (vr_wmin, wi::to_wide (min), TYPE_SIGN (type))) { p_seg &= add_condition (summary, params_summary, index, param_type, &aggpos, LT_EXPR, min, param_ops); p_all = p_all.or_with (summary->conds, p_seg); } /* Do not create sub-predicate for range that is beyond up bound of switch index. */ if (wi::le_p (vr_wmax, wi::to_wide (max), TYPE_SIGN (type))) { p_seg = false; break; } p_seg = add_condition (summary, params_summary, index, param_type, &aggpos, GT_EXPR, max, param_ops); } } p_all = p_all.or_with (summary->conds, p_seg); *(class predicate *) e->aux = p_all.or_with (summary->conds, *(class predicate *) e->aux); vec_free (param_ops); } /* For each BB in NODE attach to its AUX pointer predicate under which it is executable. */ static void compute_bb_predicates (struct ipa_func_body_info *fbi, struct cgraph_node *node, class ipa_fn_summary *summary, class ipa_node_params *params_summary) { struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); bool done = false; basic_block bb; FOR_EACH_BB_FN (bb, my_function) { set_cond_stmt_execution_predicate (fbi, summary, params_summary, bb); set_switch_stmt_execution_predicate (fbi, summary, params_summary, bb); } /* Entry block is always executable. */ ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = edge_predicate_pool.allocate (); *(predicate *) ENTRY_BLOCK_PTR_FOR_FN (my_function)->aux = true; /* A simple dataflow propagation of predicates forward in the CFG. TODO: work in reverse postorder. */ while (!done) { done = true; FOR_EACH_BB_FN (bb, my_function) { predicate p = false; edge e; edge_iterator ei; FOR_EACH_EDGE (e, ei, bb->preds) { if (e->src->aux) { predicate this_bb_predicate = *(predicate *) e->src->aux; if (e->aux) this_bb_predicate &= (*(class predicate *) e->aux); p = p.or_with (summary->conds, this_bb_predicate); if (p == true) break; } } if (p != false) { basic_block pdom_bb; if (!bb->aux) { done = false; bb->aux = edge_predicate_pool.allocate (); *((predicate *) bb->aux) = p; } else if (p != *(predicate *) bb->aux) { /* This OR operation is needed to ensure monotonous data flow in the case we hit the limit on number of clauses and the and/or operations above give approximate answers. */ p = p.or_with (summary->conds, *(predicate *)bb->aux); if (p != *(predicate *) bb->aux) { done = false; *((predicate *) bb->aux) = p; } } /* For switch/if statement, we can OR-combine predicates of all its cases/branches to get predicate for basic block in their convergence point, but sometimes this will generate very complicated predicate. Actually, we can get simplified predicate in another way by using the fact that predicate for a basic block must also hold true for its post dominators. To be specific, basic block in convergence point of conditional statement should include predicate of the statement. */ pdom_bb = get_immediate_dominator (CDI_POST_DOMINATORS, bb); if (pdom_bb == EXIT_BLOCK_PTR_FOR_FN (my_function) || !pdom_bb) ; else if (!pdom_bb->aux) { done = false; pdom_bb->aux = edge_predicate_pool.allocate (); *((predicate *) pdom_bb->aux) = p; } else if (p != *(predicate *) pdom_bb->aux) { p = p.or_with (summary->conds, *(predicate *)pdom_bb->aux); if (p != *(predicate *) pdom_bb->aux) { done = false; *((predicate *) pdom_bb->aux) = p; } } } } } } /* Return predicate specifying when the STMT might have result that is not a compile time constant. */ static predicate will_be_nonconstant_expr_predicate (ipa_func_body_info *fbi, class ipa_fn_summary *summary, class ipa_node_params *params_summary, tree expr, vec<predicate> nonconstant_names) { tree parm; int index; while (UNARY_CLASS_P (expr)) expr = TREE_OPERAND (expr, 0); parm = unmodified_parm (fbi, NULL, expr, NULL); if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0) return add_condition (summary, params_summary, index, TREE_TYPE (parm), NULL, predicate::changed, NULL_TREE); if (is_gimple_min_invariant (expr)) return false; if (TREE_CODE (expr) == SSA_NAME) return nonconstant_names[SSA_NAME_VERSION (expr)]; if (BINARY_CLASS_P (expr) || COMPARISON_CLASS_P (expr)) { predicate p1 = will_be_nonconstant_expr_predicate (fbi, summary, params_summary, TREE_OPERAND (expr, 0), nonconstant_names); if (p1 == true) return p1; predicate p2 = will_be_nonconstant_expr_predicate (fbi, summary, params_summary, TREE_OPERAND (expr, 1), nonconstant_names); return p1.or_with (summary->conds, p2); } else if (TREE_CODE (expr) == COND_EXPR) { predicate p1 = will_be_nonconstant_expr_predicate (fbi, summary, params_summary, TREE_OPERAND (expr, 0), nonconstant_names); if (p1 == true) return p1; predicate p2 = will_be_nonconstant_expr_predicate (fbi, summary, params_summary, TREE_OPERAND (expr, 1), nonconstant_names); if (p2 == true) return p2; p1 = p1.or_with (summary->conds, p2); p2 = will_be_nonconstant_expr_predicate (fbi, summary, params_summary, TREE_OPERAND (expr, 2), nonconstant_names); return p2.or_with (summary->conds, p1); } else if (TREE_CODE (expr) == CALL_EXPR) return true; else { debug_tree (expr); gcc_unreachable (); } return false; } /* Return predicate specifying when the STMT might have result that is not a compile time constant. */ static predicate will_be_nonconstant_predicate (struct ipa_func_body_info *fbi, class ipa_fn_summary *summary, class ipa_node_params *params_summary, gimple *stmt, vec<predicate> nonconstant_names) { predicate p = true; ssa_op_iter iter; tree use; tree param_type = NULL_TREE; predicate op_non_const; bool is_load; int base_index; struct agg_position_info aggpos; /* What statements might be optimized away when their arguments are constant. */ if (gimple_code (stmt) != GIMPLE_ASSIGN && gimple_code (stmt) != GIMPLE_COND && gimple_code (stmt) != GIMPLE_SWITCH && (gimple_code (stmt) != GIMPLE_CALL || !(gimple_call_flags (stmt) & ECF_CONST))) return p; /* Stores will stay anyway. */ if (gimple_store_p (stmt)) return p; is_load = gimple_assign_load_p (stmt); /* Loads can be optimized when the value is known. */ if (is_load) { tree op = gimple_assign_rhs1 (stmt); if (!decompose_param_expr (fbi, stmt, op, &base_index, &param_type, &aggpos)) return p; } else base_index = -1; /* See if we understand all operands before we start adding conditionals. */ FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { tree parm = unmodified_parm (fbi, stmt, use, NULL); /* For arguments we can build a condition. */ if (parm && ipa_get_param_decl_index (fbi->info, parm) >= 0) continue; if (TREE_CODE (use) != SSA_NAME) return p; /* If we know when operand is constant, we still can say something useful. */ if (nonconstant_names[SSA_NAME_VERSION (use)] != true) continue; return p; } if (is_load) op_non_const = add_condition (summary, params_summary, base_index, param_type, &aggpos, predicate::changed, NULL_TREE); else op_non_const = false; FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { tree parm = unmodified_parm (fbi, stmt, use, NULL); int index; if (parm && (index = ipa_get_param_decl_index (fbi->info, parm)) >= 0) { if (index != base_index) p = add_condition (summary, params_summary, index, TREE_TYPE (parm), NULL, predicate::changed, NULL_TREE); else continue; } else p = nonconstant_names[SSA_NAME_VERSION (use)]; op_non_const = p.or_with (summary->conds, op_non_const); } if ((gimple_code (stmt) == GIMPLE_ASSIGN || gimple_code (stmt) == GIMPLE_CALL) && gimple_op (stmt, 0) && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME) nonconstant_names[SSA_NAME_VERSION (gimple_op (stmt, 0))] = op_non_const; return op_non_const; } struct record_modified_bb_info { tree op; bitmap bb_set; gimple *stmt; }; /* Value is initialized in INIT_BB and used in USE_BB. We want to compute probability how often it changes between USE_BB. INIT_BB->count/USE_BB->count is an estimate, but if INIT_BB is in different loop nest, we can do better. This is all just estimate. In theory we look for minimal cut separating INIT_BB and USE_BB, but we only want to anticipate loop invariant motion anyway. */ static basic_block get_minimal_bb (basic_block init_bb, basic_block use_bb) { class loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father); if (l && l->header->count < init_bb->count) return l->header; return init_bb; } /* Callback of walk_aliased_vdefs. Records basic blocks where the value may be set except for info->stmt. */ static bool record_modified (ao_ref *ao ATTRIBUTE_UNUSED, tree vdef, void *data) { struct record_modified_bb_info *info = (struct record_modified_bb_info *) data; if (SSA_NAME_DEF_STMT (vdef) == info->stmt) return false; if (gimple_clobber_p (SSA_NAME_DEF_STMT (vdef))) return false; bitmap_set_bit (info->bb_set, SSA_NAME_IS_DEFAULT_DEF (vdef) ? ENTRY_BLOCK_PTR_FOR_FN (cfun)->index : get_minimal_bb (gimple_bb (SSA_NAME_DEF_STMT (vdef)), gimple_bb (info->stmt))->index); if (dump_file) { fprintf (dump_file, " Param "); print_generic_expr (dump_file, info->op, TDF_SLIM); fprintf (dump_file, " changed at bb %i, minimal: %i stmt: ", gimple_bb (SSA_NAME_DEF_STMT (vdef))->index, get_minimal_bb (gimple_bb (SSA_NAME_DEF_STMT (vdef)), gimple_bb (info->stmt))->index); print_gimple_stmt (dump_file, SSA_NAME_DEF_STMT (vdef), 0); } return false; } /* Return probability (based on REG_BR_PROB_BASE) that I-th parameter of STMT will change since last invocation of STMT. Value 0 is reserved for compile time invariants. For common parameters it is REG_BR_PROB_BASE. For loop invariants it ought to be REG_BR_PROB_BASE / estimated_iters. */ static int param_change_prob (ipa_func_body_info *fbi, gimple *stmt, int i) { tree op = gimple_call_arg (stmt, i); basic_block bb = gimple_bb (stmt); if (TREE_CODE (op) == WITH_SIZE_EXPR) op = TREE_OPERAND (op, 0); tree base = get_base_address (op); /* Global invariants never change. */ if (is_gimple_min_invariant (base)) return 0; /* We would have to do non-trivial analysis to really work out what is the probability of value to change (i.e. when init statement is in a sibling loop of the call). We do an conservative estimate: when call is executed N times more often than the statement defining value, we take the frequency 1/N. */ if (TREE_CODE (base) == SSA_NAME) { profile_count init_count; if (!bb->count.nonzero_p ()) return REG_BR_PROB_BASE; if (SSA_NAME_IS_DEFAULT_DEF (base)) init_count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; else init_count = get_minimal_bb (gimple_bb (SSA_NAME_DEF_STMT (base)), gimple_bb (stmt))->count; if (init_count < bb->count) return MAX ((init_count.to_sreal_scale (bb->count) * REG_BR_PROB_BASE).to_int (), 1); return REG_BR_PROB_BASE; } else { ao_ref refd; profile_count max = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; struct record_modified_bb_info info; tree init = ctor_for_folding (base); if (init != error_mark_node) return 0; if (!bb->count.nonzero_p ()) return REG_BR_PROB_BASE; if (dump_file) { fprintf (dump_file, " Analyzing param change probability of "); print_generic_expr (dump_file, op, TDF_SLIM); fprintf (dump_file, "\n"); } ao_ref_init (&refd, op); info.op = op; info.stmt = stmt; info.bb_set = BITMAP_ALLOC (NULL); int walked = walk_aliased_vdefs (&refd, gimple_vuse (stmt), record_modified, &info, NULL, NULL, fbi->aa_walk_budget); if (walked < 0 || bitmap_bit_p (info.bb_set, bb->index)) { if (dump_file) { if (walked < 0) fprintf (dump_file, " Ran out of AA walking budget.\n"); else fprintf (dump_file, " Set in same BB as used.\n"); } BITMAP_FREE (info.bb_set); return REG_BR_PROB_BASE; } bitmap_iterator bi; unsigned index; /* Lookup the most frequent update of the value and believe that it dominates all the other; precise analysis here is difficult. */ EXECUTE_IF_SET_IN_BITMAP (info.bb_set, 0, index, bi) max = max.max (BASIC_BLOCK_FOR_FN (cfun, index)->count); if (dump_file) { fprintf (dump_file, " Set with count "); max.dump (dump_file); fprintf (dump_file, " and used with count "); bb->count.dump (dump_file); fprintf (dump_file, " freq %f\n", max.to_sreal_scale (bb->count).to_double ()); } BITMAP_FREE (info.bb_set); if (max < bb->count) return MAX ((max.to_sreal_scale (bb->count) * REG_BR_PROB_BASE).to_int (), 1); return REG_BR_PROB_BASE; } } /* Find whether a basic block BB is the final block of a (half) diamond CFG sub-graph and if the predicate the condition depends on is known. If so, return true and store the pointer the predicate in *P. */ static bool phi_result_unknown_predicate (ipa_func_body_info *fbi, ipa_fn_summary *summary, class ipa_node_params *params_summary, basic_block bb, predicate *p, vec<predicate> nonconstant_names) { edge e; edge_iterator ei; basic_block first_bb = NULL; gimple *stmt; if (single_pred_p (bb)) { *p = false; return true; } FOR_EACH_EDGE (e, ei, bb->preds) { if (single_succ_p (e->src)) { if (!single_pred_p (e->src)) return false; if (!first_bb) first_bb = single_pred (e->src); else if (single_pred (e->src) != first_bb) return false; } else { if (!first_bb) first_bb = e->src; else if (e->src != first_bb) return false; } } if (!first_bb) return false; stmt = last_stmt (first_bb); if (!stmt || gimple_code (stmt) != GIMPLE_COND || !is_gimple_ip_invariant (gimple_cond_rhs (stmt))) return false; *p = will_be_nonconstant_expr_predicate (fbi, summary, params_summary, gimple_cond_lhs (stmt), nonconstant_names); if (*p == true) return false; else return true; } /* Given a PHI statement in a function described by inline properties SUMMARY and *P being the predicate describing whether the selected PHI argument is known, store a predicate for the result of the PHI statement into NONCONSTANT_NAMES, if possible. */ static void predicate_for_phi_result (class ipa_fn_summary *summary, gphi *phi, predicate *p, vec<predicate> nonconstant_names) { unsigned i; for (i = 0; i < gimple_phi_num_args (phi); i++) { tree arg = gimple_phi_arg (phi, i)->def; if (!is_gimple_min_invariant (arg)) { gcc_assert (TREE_CODE (arg) == SSA_NAME); *p = p->or_with (summary->conds, nonconstant_names[SSA_NAME_VERSION (arg)]); if (*p == true) return; } } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\t\tphi predicate: "); p->dump (dump_file, summary->conds); } nonconstant_names[SSA_NAME_VERSION (gimple_phi_result (phi))] = *p; } /* For a typical usage of __builtin_expect (a<b, 1), we may introduce an extra relation stmt: With the builtin, we have t1 = a <= b; t2 = (long int) t1; t3 = __builtin_expect (t2, 1); if (t3 != 0) goto ... Without the builtin, we have if (a<=b) goto... This affects the size/time estimation and may have an impact on the earlier inlining. Here find this pattern and fix it up later. */ static gimple * find_foldable_builtin_expect (basic_block bb) { gimple_stmt_iterator bsi; for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple *stmt = gsi_stmt (bsi); if (gimple_call_builtin_p (stmt, BUILT_IN_EXPECT) || gimple_call_builtin_p (stmt, BUILT_IN_EXPECT_WITH_PROBABILITY) || gimple_call_internal_p (stmt, IFN_BUILTIN_EXPECT)) { tree var = gimple_call_lhs (stmt); tree arg = gimple_call_arg (stmt, 0); use_operand_p use_p; gimple *use_stmt; bool match = false; bool done = false; if (!var || !arg) continue; gcc_assert (TREE_CODE (var) == SSA_NAME); while (TREE_CODE (arg) == SSA_NAME) { gimple *stmt_tmp = SSA_NAME_DEF_STMT (arg); if (!is_gimple_assign (stmt_tmp)) break; switch (gimple_assign_rhs_code (stmt_tmp)) { case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: match = true; done = true; break; CASE_CONVERT: break; default: done = true; break; } if (done) break; arg = gimple_assign_rhs1 (stmt_tmp); } if (match && single_imm_use (var, &use_p, &use_stmt) && gimple_code (use_stmt) == GIMPLE_COND) return use_stmt; } } return NULL; } /* Return true when the basic blocks contains only clobbers followed by RESX. Such BBs are kept around to make removal of dead stores possible with presence of EH and will be optimized out by optimize_clobbers later in the game. NEED_EH is used to recurse in case the clobber has non-EH predecessors that can be clobber only, too.. When it is false, the RESX is not necessary on the end of basic block. */ static bool clobber_only_eh_bb_p (basic_block bb, bool need_eh = true) { gimple_stmt_iterator gsi = gsi_last_bb (bb); edge_iterator ei; edge e; if (need_eh) { if (gsi_end_p (gsi)) return false; if (gimple_code (gsi_stmt (gsi)) != GIMPLE_RESX) return false; gsi_prev (&gsi); } else if (!single_succ_p (bb)) return false; for (; !gsi_end_p (gsi); gsi_prev (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; if (gimple_clobber_p (stmt)) continue; if (gimple_code (stmt) == GIMPLE_LABEL) break; return false; } /* See if all predecessors are either throws or clobber only BBs. */ FOR_EACH_EDGE (e, ei, bb->preds) if (!(e->flags & EDGE_EH) && !clobber_only_eh_bb_p (e->src, false)) return false; return true; } /* Return true if STMT compute a floating point expression that may be affected by -ffast-math and similar flags. */ static bool fp_expression_p (gimple *stmt) { ssa_op_iter i; tree op; FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF|SSA_OP_USE) if (FLOAT_TYPE_P (TREE_TYPE (op))) return true; return false; } /* Analyze function body for NODE. EARLY indicates run from early optimization pipeline. */ static void analyze_function_body (struct cgraph_node *node, bool early) { sreal time = opt_for_fn (node->decl, param_uninlined_function_time); /* Estimate static overhead for function prologue/epilogue and alignment. */ int size = opt_for_fn (node->decl, param_uninlined_function_insns); /* Benefits are scaled by probability of elimination that is in range <0,2>. */ basic_block bb; struct function *my_function = DECL_STRUCT_FUNCTION (node->decl); sreal freq; class ipa_fn_summary *info = ipa_fn_summaries->get_create (node); class ipa_node_params *params_summary = early ? NULL : IPA_NODE_REF (node); predicate bb_predicate; struct ipa_func_body_info fbi; vec<predicate> nonconstant_names = vNULL; int nblocks, n; int *order; gimple *fix_builtin_expect_stmt; gcc_assert (my_function && my_function->cfg); gcc_assert (cfun == my_function); memset(&fbi, 0, sizeof(fbi)); vec_free (info->conds); info->conds = NULL; vec_free (info->size_time_table); info->size_time_table = NULL; /* When optimizing and analyzing for IPA inliner, initialize loop optimizer so we can produce proper inline hints. When optimizing and analyzing for early inliner, initialize node params so we can produce correct BB predicates. */ if (opt_for_fn (node->decl, optimize)) { calculate_dominance_info (CDI_DOMINATORS); calculate_dominance_info (CDI_POST_DOMINATORS); if (!early) loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); else { ipa_check_create_node_params (); ipa_initialize_node_params (node); } if (ipa_node_params_sum) { fbi.node = node; fbi.info = IPA_NODE_REF (node); fbi.bb_infos = vNULL; fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); fbi.param_count = count_formal_params (node->decl); fbi.aa_walk_budget = opt_for_fn (node->decl, param_ipa_max_aa_steps); nonconstant_names.safe_grow_cleared (SSANAMES (my_function)->length ()); } } if (dump_file) fprintf (dump_file, "\nAnalyzing function body size: %s\n", node->dump_name ()); /* When we run into maximal number of entries, we assign everything to the constant truth case. Be sure to have it in list. */ bb_predicate = true; info->account_size_time (0, 0, bb_predicate, bb_predicate); bb_predicate = predicate::not_inlined (); info->account_size_time (opt_for_fn (node->decl, param_uninlined_function_insns) * ipa_fn_summary::size_scale, opt_for_fn (node->decl, param_uninlined_function_time), bb_predicate, bb_predicate); if (fbi.info) compute_bb_predicates (&fbi, node, info, params_summary); order = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); nblocks = pre_and_rev_post_order_compute (NULL, order, false); for (n = 0; n < nblocks; n++) { bb = BASIC_BLOCK_FOR_FN (cfun, order[n]); freq = bb->count.to_sreal_scale (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count); if (clobber_only_eh_bb_p (bb)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n Ignoring BB %i;" " it will be optimized away by cleanup_clobbers\n", bb->index); continue; } /* TODO: Obviously predicates can be propagated down across CFG. */ if (fbi.info) { if (bb->aux) bb_predicate = *(predicate *) bb->aux; else bb_predicate = false; } else bb_predicate = true; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\n BB %i predicate:", bb->index); bb_predicate.dump (dump_file, info->conds); } if (fbi.info && nonconstant_names.exists ()) { predicate phi_predicate; bool first_phi = true; for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { if (first_phi && !phi_result_unknown_predicate (&fbi, info, params_summary, bb, &phi_predicate, nonconstant_names)) break; first_phi = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " "); print_gimple_stmt (dump_file, gsi_stmt (bsi), 0); } predicate_for_phi_result (info, bsi.phi (), &phi_predicate, nonconstant_names); } } fix_builtin_expect_stmt = find_foldable_builtin_expect (bb); for (gimple_stmt_iterator bsi = gsi_start_nondebug_bb (bb); !gsi_end_p (bsi); gsi_next_nondebug (&bsi)) { gimple *stmt = gsi_stmt (bsi); int this_size = estimate_num_insns (stmt, &eni_size_weights); int this_time = estimate_num_insns (stmt, &eni_time_weights); int prob; predicate will_be_nonconstant; /* This relation stmt should be folded after we remove __builtin_expect call. Adjust the cost here. */ if (stmt == fix_builtin_expect_stmt) { this_size--; this_time--; } if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " "); print_gimple_stmt (dump_file, stmt, 0); fprintf (dump_file, "\t\tfreq:%3.2f size:%3i time:%3i\n", freq.to_double (), this_size, this_time); } if (is_gimple_call (stmt) && !gimple_call_internal_p (stmt)) { struct cgraph_edge *edge = node->get_edge (stmt); ipa_call_summary *es = ipa_call_summaries->get_create (edge); /* Special case: results of BUILT_IN_CONSTANT_P will be always resolved as constant. We however don't want to optimize out the cgraph edges. */ if (nonconstant_names.exists () && gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P) && gimple_call_lhs (stmt) && TREE_CODE (gimple_call_lhs (stmt)) == SSA_NAME) { predicate false_p = false; nonconstant_names[SSA_NAME_VERSION (gimple_call_lhs (stmt))] = false_p; } if (ipa_node_params_sum) { int count = gimple_call_num_args (stmt); int i; if (count) es->param.safe_grow_cleared (count); for (i = 0; i < count; i++) { int prob = param_change_prob (&fbi, stmt, i); gcc_assert (prob >= 0 && prob <= REG_BR_PROB_BASE); es->param[i].change_prob = prob; } } es->call_stmt_size = this_size; es->call_stmt_time = this_time; es->loop_depth = bb_loop_depth (bb); edge_set_predicate (edge, &bb_predicate); if (edge->speculative) { cgraph_edge *indirect = edge->speculative_call_indirect_edge (); ipa_call_summary *es2 = ipa_call_summaries->get_create (indirect); ipa_call_summaries->duplicate (edge, indirect, es, es2); /* Edge is the first direct call. create and duplicate call summaries for multiple speculative call targets. */ for (cgraph_edge *direct = edge->next_speculative_call_target (); direct; direct = direct->next_speculative_call_target ()) { ipa_call_summary *es3 = ipa_call_summaries->get_create (direct); ipa_call_summaries->duplicate (edge, direct, es, es3); } } } /* TODO: When conditional jump or switch is known to be constant, but we did not translate it into the predicates, we really can account just maximum of the possible paths. */ if (fbi.info) will_be_nonconstant = will_be_nonconstant_predicate (&fbi, info, params_summary, stmt, nonconstant_names); else will_be_nonconstant = true; if (this_time || this_size) { sreal final_time = (sreal)this_time * freq; prob = eliminated_by_inlining_prob (&fbi, stmt); if (prob == 1 && dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\t\t50%% will be eliminated by inlining\n"); if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\t\tWill be eliminated by inlining\n"); class predicate p = bb_predicate & will_be_nonconstant; /* We can ignore statement when we proved it is never going to happen, but we cannot do that for call statements because edges are accounted specially. */ if (*(is_gimple_call (stmt) ? &bb_predicate : &p) != false) { time += final_time; size += this_size; } /* We account everything but the calls. Calls have their own size/time info attached to cgraph edges. This is necessary in order to make the cost disappear after inlining. */ if (!is_gimple_call (stmt)) { if (prob) { predicate ip = bb_predicate & predicate::not_inlined (); info->account_size_time (this_size * prob, (final_time * prob) / 2, ip, p); } if (prob != 2) info->account_size_time (this_size * (2 - prob), (final_time * (2 - prob) / 2), bb_predicate, p); } if (!info->fp_expressions && fp_expression_p (stmt)) { info->fp_expressions = true; if (dump_file) fprintf (dump_file, " fp_expression set\n"); } } /* Account cost of address calculations in the statements. */ for (unsigned int i = 0; i < gimple_num_ops (stmt); i++) { for (tree op = gimple_op (stmt, i); op && handled_component_p (op); op = TREE_OPERAND (op, 0)) if ((TREE_CODE (op) == ARRAY_REF || TREE_CODE (op) == ARRAY_RANGE_REF) && TREE_CODE (TREE_OPERAND (op, 1)) == SSA_NAME) { predicate p = bb_predicate; if (fbi.info) p = p & will_be_nonconstant_expr_predicate (&fbi, info, params_summary, TREE_OPERAND (op, 1), nonconstant_names); if (p != false) { time += freq; size += 1; if (dump_file) fprintf (dump_file, "\t\tAccounting address calculation.\n"); info->account_size_time (ipa_fn_summary::size_scale, freq, bb_predicate, p); } } } } } free (order); if (nonconstant_names.exists () && !early) { class loop *loop; predicate loop_iterations = true; predicate loop_stride = true; if (dump_file && (dump_flags & TDF_DETAILS)) flow_loops_dump (dump_file, NULL, 0); scev_initialize (); FOR_EACH_LOOP (loop, 0) { vec<edge> exits; edge ex; unsigned int j; class tree_niter_desc niter_desc; if (loop->header->aux) bb_predicate = *(predicate *) loop->header->aux; else bb_predicate = false; exits = get_loop_exit_edges (loop); FOR_EACH_VEC_ELT (exits, j, ex) if (number_of_iterations_exit (loop, ex, &niter_desc, false) && !is_gimple_min_invariant (niter_desc.niter)) { predicate will_be_nonconstant = will_be_nonconstant_expr_predicate (&fbi, info, params_summary, niter_desc.niter, nonconstant_names); if (will_be_nonconstant != true) will_be_nonconstant = bb_predicate & will_be_nonconstant; if (will_be_nonconstant != true && will_be_nonconstant != false) /* This is slightly inprecise. We may want to represent each loop with independent predicate. */ loop_iterations &= will_be_nonconstant; } exits.release (); } /* To avoid quadratic behavior we analyze stride predicates only with respect to the containing loop. Thus we simply iterate over all defs in the outermost loop body. */ for (loop = loops_for_fn (cfun)->tree_root->inner; loop != NULL; loop = loop->next) { basic_block *body = get_loop_body (loop); for (unsigned i = 0; i < loop->num_nodes; i++) { gimple_stmt_iterator gsi; if (body[i]->aux) bb_predicate = *(predicate *) body[i]->aux; else bb_predicate = false; for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (!is_gimple_assign (stmt)) continue; tree def = gimple_assign_lhs (stmt); if (TREE_CODE (def) != SSA_NAME) continue; affine_iv iv; if (!simple_iv (loop_containing_stmt (stmt), loop_containing_stmt (stmt), def, &iv, true) || is_gimple_min_invariant (iv.step)) continue; predicate will_be_nonconstant = will_be_nonconstant_expr_predicate (&fbi, info, params_summary, iv.step, nonconstant_names); if (will_be_nonconstant != true) will_be_nonconstant = bb_predicate & will_be_nonconstant; if (will_be_nonconstant != true && will_be_nonconstant != false) /* This is slightly inprecise. We may want to represent each loop with independent predicate. */ loop_stride = loop_stride & will_be_nonconstant; } } free (body); } ipa_fn_summary *s = ipa_fn_summaries->get (node); set_hint_predicate (&s->loop_iterations, loop_iterations); set_hint_predicate (&s->loop_stride, loop_stride); scev_finalize (); } FOR_ALL_BB_FN (bb, my_function) { edge e; edge_iterator ei; if (bb->aux) edge_predicate_pool.remove ((predicate *)bb->aux); bb->aux = NULL; FOR_EACH_EDGE (e, ei, bb->succs) { if (e->aux) edge_predicate_pool.remove ((predicate *) e->aux); e->aux = NULL; } } ipa_fn_summary *s = ipa_fn_summaries->get (node); ipa_size_summary *ss = ipa_size_summaries->get (node); s->time = time; ss->self_size = size; nonconstant_names.release (); ipa_release_body_info (&fbi); if (opt_for_fn (node->decl, optimize)) { if (!early) loop_optimizer_finalize (); else if (!ipa_edge_args_sum) ipa_free_all_node_params (); free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); } if (dump_file) { fprintf (dump_file, "\n"); ipa_dump_fn_summary (dump_file, node); } } /* Compute function summary. EARLY is true when we compute parameters during early opts. */ void compute_fn_summary (struct cgraph_node *node, bool early) { HOST_WIDE_INT self_stack_size; struct cgraph_edge *e; gcc_assert (!node->inlined_to); if (!ipa_fn_summaries) ipa_fn_summary_alloc (); /* Create a new ipa_fn_summary. */ ((ipa_fn_summary_t *)ipa_fn_summaries)->remove_callees (node); ipa_fn_summaries->remove (node); class ipa_fn_summary *info = ipa_fn_summaries->get_create (node); class ipa_size_summary *size_info = ipa_size_summaries->get_create (node); /* Estimate the stack size for the function if we're optimizing. */ self_stack_size = optimize && !node->thunk.thunk_p ? estimated_stack_frame_size (node) : 0; size_info->estimated_self_stack_size = self_stack_size; info->estimated_stack_size = self_stack_size; if (node->thunk.thunk_p) { ipa_call_summary *es = ipa_call_summaries->get_create (node->callees); predicate t = true; node->can_change_signature = false; es->call_stmt_size = eni_size_weights.call_cost; es->call_stmt_time = eni_time_weights.call_cost; info->account_size_time (ipa_fn_summary::size_scale * opt_for_fn (node->decl, param_uninlined_function_thunk_insns), opt_for_fn (node->decl, param_uninlined_function_thunk_time), t, t); t = predicate::not_inlined (); info->account_size_time (2 * ipa_fn_summary::size_scale, 0, t, t); ipa_update_overall_fn_summary (node); size_info->self_size = size_info->size; if (stdarg_p (TREE_TYPE (node->decl))) { info->inlinable = false; node->callees->inline_failed = CIF_VARIADIC_THUNK; } else info->inlinable = true; } else { /* Even is_gimple_min_invariant rely on current_function_decl. */ push_cfun (DECL_STRUCT_FUNCTION (node->decl)); /* During IPA profile merging we may be called w/o virtual SSA form built. */ update_ssa (TODO_update_ssa_only_virtuals); /* Can this function be inlined at all? */ if (!opt_for_fn (node->decl, optimize) && !lookup_attribute ("always_inline", DECL_ATTRIBUTES (node->decl))) info->inlinable = false; else info->inlinable = tree_inlinable_function_p (node->decl); /* Type attributes can use parameter indices to describe them. */ if (TYPE_ATTRIBUTES (TREE_TYPE (node->decl)) /* Likewise for #pragma omp declare simd functions or functions with simd attribute. */ || lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl))) node->can_change_signature = false; else { /* Otherwise, inlinable functions always can change signature. */ if (info->inlinable) node->can_change_signature = true; else { /* Functions calling builtin_apply cannot change signature. */ for (e = node->callees; e; e = e->next_callee) { tree cdecl = e->callee->decl; if (fndecl_built_in_p (cdecl, BUILT_IN_APPLY_ARGS) || fndecl_built_in_p (cdecl, BUILT_IN_VA_START)) break; } node->can_change_signature = !e; } } analyze_function_body (node, early); pop_cfun (); } /* Inlining characteristics are maintained by the cgraph_mark_inline. */ size_info->size = size_info->self_size; info->estimated_stack_size = size_info->estimated_self_stack_size; /* Code above should compute exactly the same result as ipa_update_overall_fn_summary except for case when speculative edges are present since these are accounted to size but not self_size. Do not compare time since different order the roundoff errors result in slight changes. */ ipa_update_overall_fn_summary (node); if (flag_checking) { for (e = node->indirect_calls; e; e = e->next_callee) if (e->speculative) break; gcc_assert (e || size_info->size == size_info->self_size); } } /* Compute parameters of functions used by inliner using current_function_decl. */ static unsigned int compute_fn_summary_for_current (void) { compute_fn_summary (cgraph_node::get (current_function_decl), true); return 0; } /* Estimate benefit devirtualizing indirect edge IE, provided KNOWN_VALS, KNOWN_CONTEXTS and KNOWN_AGGS. */ static bool estimate_edge_devirt_benefit (struct cgraph_edge *ie, int *size, int *time, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_value_set> known_aggs) { tree target; struct cgraph_node *callee; class ipa_fn_summary *isummary; enum availability avail; bool speculative; if (!known_vals.length () && !known_contexts.length ()) return false; if (!opt_for_fn (ie->caller->decl, flag_indirect_inlining)) return false; target = ipa_get_indirect_edge_target (ie, known_vals, known_contexts, known_aggs, &speculative); if (!target || speculative) return false; /* Account for difference in cost between indirect and direct calls. */ *size -= (eni_size_weights.indirect_call_cost - eni_size_weights.call_cost); *time -= (eni_time_weights.indirect_call_cost - eni_time_weights.call_cost); gcc_checking_assert (*time >= 0); gcc_checking_assert (*size >= 0); callee = cgraph_node::get (target); if (!callee || !callee->definition) return false; callee = callee->function_symbol (&avail); if (avail < AVAIL_AVAILABLE) return false; isummary = ipa_fn_summaries->get (callee); if (isummary == NULL) return false; return isummary->inlinable; } /* Increase SIZE, MIN_SIZE (if non-NULL) and TIME for size and time needed to handle edge E with probability PROB. Set HINTS if edge may be devirtualized. KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call site. */ static inline void estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size, sreal *time, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_value_set> known_aggs, ipa_hints *hints) { class ipa_call_summary *es = ipa_call_summaries->get (e); int call_size = es->call_stmt_size; int call_time = es->call_stmt_time; int cur_size; if (!e->callee && hints && e->maybe_hot_p () && estimate_edge_devirt_benefit (e, &call_size, &call_time, known_vals, known_contexts, known_aggs)) *hints |= INLINE_HINT_indirect_call; cur_size = call_size * ipa_fn_summary::size_scale; *size += cur_size; if (min_size) *min_size += cur_size; if (time) *time += ((sreal)call_time) * e->sreal_frequency (); } /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call site. Helper for estimate_calls_size_and_time which does the same but (in most cases) faster. */ static void estimate_calls_size_and_time_1 (struct cgraph_node *node, int *size, int *min_size, sreal *time, ipa_hints *hints, clause_t possible_truths, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_value_set> known_aggs) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) { if (!e->inline_failed) { gcc_checking_assert (!ipa_call_summaries->get (e)); estimate_calls_size_and_time_1 (e->callee, size, min_size, time, hints, possible_truths, known_vals, known_contexts, known_aggs); continue; } class ipa_call_summary *es = ipa_call_summaries->get (e); /* Do not care about zero sized builtins. */ if (!es->call_stmt_size) { gcc_checking_assert (!es->call_stmt_time); continue; } if (!es->predicate || es->predicate->evaluate (possible_truths)) { /* Predicates of calls shall not use NOT_CHANGED codes, so we do not need to compute probabilities. */ estimate_edge_size_and_time (e, size, es->predicate ? NULL : min_size, time, known_vals, known_contexts, known_aggs, hints); } } for (e = node->indirect_calls; e; e = e->next_callee) { class ipa_call_summary *es = ipa_call_summaries->get (e); if (!es->predicate || es->predicate->evaluate (possible_truths)) estimate_edge_size_and_time (e, size, es->predicate ? NULL : min_size, time, known_vals, known_contexts, known_aggs, hints); } } /* Populate sum->call_size_time_table for edges from NODE. */ static void summarize_calls_size_and_time (struct cgraph_node *node, ipa_fn_summary *sum) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) { if (!e->inline_failed) { gcc_checking_assert (!ipa_call_summaries->get (e)); summarize_calls_size_and_time (e->callee, sum); continue; } int size = 0; sreal time = 0; estimate_edge_size_and_time (e, &size, NULL, &time, vNULL, vNULL, vNULL, NULL); struct predicate pred = true; class ipa_call_summary *es = ipa_call_summaries->get (e); if (es->predicate) pred = *es->predicate; sum->account_size_time (size, time, pred, pred, true); } for (e = node->indirect_calls; e; e = e->next_callee) { int size = 0; sreal time = 0; estimate_edge_size_and_time (e, &size, NULL, &time, vNULL, vNULL, vNULL, NULL); struct predicate pred = true; class ipa_call_summary *es = ipa_call_summaries->get (e); if (es->predicate) pred = *es->predicate; sum->account_size_time (size, time, pred, pred, true); } } /* Increase SIZE, MIN_SIZE and TIME for size and time needed to handle all calls in NODE. POSSIBLE_TRUTHS, KNOWN_VALS, KNOWN_AGGS and KNOWN_CONTEXTS describe context of the call site. */ static void estimate_calls_size_and_time (struct cgraph_node *node, int *size, int *min_size, sreal *time, ipa_hints *hints, clause_t possible_truths, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_value_set> known_aggs) { class ipa_fn_summary *sum = ipa_fn_summaries->get (node); bool use_table = true; gcc_assert (node->callees || node->indirect_calls); /* During early inlining we do not calculate info for very large functions and thus there is no need for producing summaries. */ if (!ipa_node_params_sum) use_table = false; /* Do not calculate summaries for simple wrappers; it is waste of memory. */ else if (node->callees && node->indirect_calls && node->callees->inline_failed && !node->callees->next_callee) use_table = false; /* If there is an indirect edge that may be optimized, we need to go the slow way. */ else if ((known_vals.length () || known_contexts.length () || known_aggs.length ()) && hints) { class ipa_node_params *params_summary = IPA_NODE_REF (node); unsigned int nargs = params_summary ? ipa_get_param_count (params_summary) : 0; for (unsigned int i = 0; i < nargs && use_table; i++) { if (ipa_is_param_used_by_indirect_call (params_summary, i) && ((known_vals.length () > i && known_vals[i]) || (known_aggs.length () > i && known_aggs[i].items.length ()))) use_table = false; else if (ipa_is_param_used_by_polymorphic_call (params_summary, i) && (known_contexts.length () > i && !known_contexts[i].useless_p ())) use_table = false; } } /* Fast path is via the call size time table. */ if (use_table) { /* Build summary if it is absent. */ if (!sum->call_size_time_table) { predicate true_pred = true; sum->account_size_time (0, 0, true_pred, true_pred, true); summarize_calls_size_and_time (node, sum); } int old_size = *size; sreal old_time = time ? *time : 0; if (min_size) *min_size += (*sum->call_size_time_table)[0].size; unsigned int i; size_time_entry *e; /* Walk the table and account sizes and times. */ for (i = 0; vec_safe_iterate (sum->call_size_time_table, i, &e); i++) if (e->exec_predicate.evaluate (possible_truths)) { *size += e->size; if (time) *time += e->time; } /* Be careful and see if both methods agree. */ if ((flag_checking || dump_file) /* Do not try to sanity check when we know we lost some precision. */ && sum->call_size_time_table->length () < ipa_fn_summary::max_size_time_table_size) { estimate_calls_size_and_time_1 (node, &old_size, NULL, &old_time, NULL, possible_truths, known_vals, known_contexts, known_aggs); gcc_assert (*size == old_size); if (time && (*time - old_time > 1 || *time - old_time < -1) && dump_file) fprintf (dump_file, "Time mismatch in call summary %f!=%f\n", old_time.to_double (), time->to_double ()); } } /* Slow path by walking all edges. */ else estimate_calls_size_and_time_1 (node, size, min_size, time, hints, possible_truths, known_vals, known_contexts, known_aggs); } /* Default constructor for ipa call context. Memory allocation of known_vals, known_contexts and known_aggs vectors is owned by the caller, but can be release by ipa_call_context::release. inline_param_summary is owned by the caller. */ ipa_call_context::ipa_call_context (cgraph_node *node, clause_t possible_truths, clause_t nonspec_possible_truths, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_value_set> known_aggs, vec<inline_param_summary> inline_param_summary) : m_node (node), m_possible_truths (possible_truths), m_nonspec_possible_truths (nonspec_possible_truths), m_inline_param_summary (inline_param_summary), m_known_vals (known_vals), m_known_contexts (known_contexts), m_known_aggs (known_aggs) { } /* Set THIS to be a duplicate of CTX. Copy all relevant info. */ void ipa_call_context::duplicate_from (const ipa_call_context &ctx) { m_node = ctx.m_node; m_possible_truths = ctx.m_possible_truths; m_nonspec_possible_truths = ctx.m_nonspec_possible_truths; class ipa_node_params *params_summary = IPA_NODE_REF (m_node); unsigned int nargs = params_summary ? ipa_get_param_count (params_summary) : 0; m_inline_param_summary = vNULL; /* Copy the info only if there is at least one useful entry. */ if (ctx.m_inline_param_summary.exists ()) { unsigned int n = MIN (ctx.m_inline_param_summary.length (), nargs); for (unsigned int i = 0; i < n; i++) if (ipa_is_param_used_by_ipa_predicates (params_summary, i) && !ctx.m_inline_param_summary[i].useless_p ()) { m_inline_param_summary = ctx.m_inline_param_summary.copy (); break; } } m_known_vals = vNULL; if (ctx.m_known_vals.exists ()) { unsigned int n = MIN (ctx.m_known_vals.length (), nargs); for (unsigned int i = 0; i < n; i++) if (ipa_is_param_used_by_indirect_call (params_summary, i) && ctx.m_known_vals[i]) { m_known_vals = ctx.m_known_vals.copy (); break; } } m_known_contexts = vNULL; if (ctx.m_known_contexts.exists ()) { unsigned int n = MIN (ctx.m_known_contexts.length (), nargs); for (unsigned int i = 0; i < n; i++) if (ipa_is_param_used_by_polymorphic_call (params_summary, i) && !ctx.m_known_contexts[i].useless_p ()) { m_known_contexts = ctx.m_known_contexts.copy (); break; } } m_known_aggs = vNULL; if (ctx.m_known_aggs.exists ()) { unsigned int n = MIN (ctx.m_known_aggs.length (), nargs); for (unsigned int i = 0; i < n; i++) if (ipa_is_param_used_by_indirect_call (params_summary, i) && !ctx.m_known_aggs[i].is_empty ()) { m_known_aggs = ipa_copy_agg_values (ctx.m_known_aggs); break; } } } /* Release memory used by known_vals/contexts/aggs vectors. If ALL is true release also inline_param_summary. This happens when context was previously duplicated to be stored into cache. */ void ipa_call_context::release (bool all) { /* See if context is initialized at first place. */ if (!m_node) return; ipa_release_agg_values (m_known_aggs, all); if (all) { m_known_vals.release (); m_known_contexts.release (); m_inline_param_summary.release (); } } /* Return true if CTX describes the same call context as THIS. */ bool ipa_call_context::equal_to (const ipa_call_context &ctx) { if (m_node != ctx.m_node || m_possible_truths != ctx.m_possible_truths || m_nonspec_possible_truths != ctx.m_nonspec_possible_truths) return false; class ipa_node_params *params_summary = IPA_NODE_REF (m_node); unsigned int nargs = params_summary ? ipa_get_param_count (params_summary) : 0; if (m_inline_param_summary.exists () || ctx.m_inline_param_summary.exists ()) { for (unsigned int i = 0; i < nargs; i++) { if (!ipa_is_param_used_by_ipa_predicates (params_summary, i)) continue; if (i >= m_inline_param_summary.length () || m_inline_param_summary[i].useless_p ()) { if (i < ctx.m_inline_param_summary.length () && !ctx.m_inline_param_summary[i].useless_p ()) return false; continue; } if (i >= ctx.m_inline_param_summary.length () || ctx.m_inline_param_summary[i].useless_p ()) { if (i < m_inline_param_summary.length () && !m_inline_param_summary[i].useless_p ()) return false; continue; } if (!m_inline_param_summary[i].equal_to (ctx.m_inline_param_summary[i])) return false; } } if (m_known_vals.exists () || ctx.m_known_vals.exists ()) { for (unsigned int i = 0; i < nargs; i++) { if (!ipa_is_param_used_by_indirect_call (params_summary, i)) continue; if (i >= m_known_vals.length () || !m_known_vals[i]) { if (i < ctx.m_known_vals.length () && ctx.m_known_vals[i]) return false; continue; } if (i >= ctx.m_known_vals.length () || !ctx.m_known_vals[i]) { if (i < m_known_vals.length () && m_known_vals[i]) return false; continue; } if (m_known_vals[i] != ctx.m_known_vals[i]) return false; } } if (m_known_contexts.exists () || ctx.m_known_contexts.exists ()) { for (unsigned int i = 0; i < nargs; i++) { if (!ipa_is_param_used_by_polymorphic_call (params_summary, i)) continue; if (i >= m_known_contexts.length () || m_known_contexts[i].useless_p ()) { if (i < ctx.m_known_contexts.length () && !ctx.m_known_contexts[i].useless_p ()) return false; continue; } if (i >= ctx.m_known_contexts.length () || ctx.m_known_contexts[i].useless_p ()) { if (i < m_known_contexts.length () && !m_known_contexts[i].useless_p ()) return false; continue; } if (!m_known_contexts[i].equal_to (ctx.m_known_contexts[i])) return false; } } if (m_known_aggs.exists () || ctx.m_known_aggs.exists ()) { for (unsigned int i = 0; i < nargs; i++) { if (!ipa_is_param_used_by_indirect_call (params_summary, i)) continue; if (i >= m_known_aggs.length () || m_known_aggs[i].is_empty ()) { if (i < ctx.m_known_aggs.length () && !ctx.m_known_aggs[i].is_empty ()) return false; continue; } if (i >= ctx.m_known_aggs.length () || ctx.m_known_aggs[i].is_empty ()) { if (i < m_known_aggs.length () && !m_known_aggs[i].is_empty ()) return false; continue; } if (!m_known_aggs[i].equal_to (ctx.m_known_aggs[i])) return false; } } return true; } /* Estimate size and time needed to execute call in the given context. Additionally determine hints determined by the context. Finally compute minimal size needed for the call that is independent on the call context and can be used for fast estimates. Return the values in RET_SIZE, RET_MIN_SIZE, RET_TIME and RET_HINTS. */ void ipa_call_context::estimate_size_and_time (int *ret_size, int *ret_min_size, sreal *ret_time, sreal *ret_nonspecialized_time, ipa_hints *ret_hints) { class ipa_fn_summary *info = ipa_fn_summaries->get (m_node); size_time_entry *e; int size = 0; sreal time = 0; int min_size = 0; ipa_hints hints = 0; int i; if (dump_file && (dump_flags & TDF_DETAILS)) { bool found = false; fprintf (dump_file, " Estimating body: %s\n" " Known to be false: ", m_node->dump_name ()); for (i = predicate::not_inlined_condition; i < (predicate::first_dynamic_condition + (int) vec_safe_length (info->conds)); i++) if (!(m_possible_truths & (1 << i))) { if (found) fprintf (dump_file, ", "); found = true; dump_condition (dump_file, info->conds, i); } } if (m_node->callees || m_node->indirect_calls) estimate_calls_size_and_time (m_node, &size, &min_size, ret_time ? &time : NULL, ret_hints ? &hints : NULL, m_possible_truths, m_known_vals, m_known_contexts, m_known_aggs); sreal nonspecialized_time = time; min_size += (*info->size_time_table)[0].size; for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++) { bool exec = e->exec_predicate.evaluate (m_nonspec_possible_truths); /* Because predicates are conservative, it can happen that nonconst is 1 but exec is 0. */ if (exec) { bool nonconst = e->nonconst_predicate.evaluate (m_possible_truths); gcc_checking_assert (e->time >= 0); gcc_checking_assert (time >= 0); /* We compute specialized size only because size of nonspecialized copy is context independent. The difference between nonspecialized execution and specialized is that nonspecialized is not going to have optimized out computations known to be constant in a specialized setting. */ if (nonconst) size += e->size; if (!ret_time) continue; nonspecialized_time += e->time; if (!nonconst) ; else if (!m_inline_param_summary.exists ()) { if (nonconst) time += e->time; } else { int prob = e->nonconst_predicate.probability (info->conds, m_possible_truths, m_inline_param_summary); gcc_checking_assert (prob >= 0); gcc_checking_assert (prob <= REG_BR_PROB_BASE); if (prob == REG_BR_PROB_BASE) time += e->time; else time += e->time * prob / REG_BR_PROB_BASE; } gcc_checking_assert (time >= 0); } } gcc_checking_assert ((*info->size_time_table)[0].exec_predicate == true); gcc_checking_assert ((*info->size_time_table)[0].nonconst_predicate == true); gcc_checking_assert (min_size >= 0); gcc_checking_assert (size >= 0); gcc_checking_assert (time >= 0); /* nonspecialized_time should be always bigger than specialized time. Roundoff issues however may get into the way. */ gcc_checking_assert ((nonspecialized_time - time * 99 / 100) >= -1); /* Roundoff issues may make specialized time bigger than nonspecialized time. We do not really want that to happen because some heuristics may get confused by seeing negative speedups. */ if (time > nonspecialized_time) time = nonspecialized_time; if (ret_hints) { if (info->loop_iterations && !info->loop_iterations->evaluate (m_possible_truths)) hints |= INLINE_HINT_loop_iterations; if (info->loop_stride && !info->loop_stride->evaluate (m_possible_truths)) hints |= INLINE_HINT_loop_stride; if (info->scc_no) hints |= INLINE_HINT_in_scc; if (DECL_DECLARED_INLINE_P (m_node->decl)) hints |= INLINE_HINT_declared_inline; } size = RDIV (size, ipa_fn_summary::size_scale); min_size = RDIV (min_size, ipa_fn_summary::size_scale); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "\n size:%i time:%f nonspec time:%f\n", (int) size, time.to_double (), nonspecialized_time.to_double ()); if (ret_time) *ret_time = time; if (ret_nonspecialized_time) *ret_nonspecialized_time = nonspecialized_time; if (ret_size) *ret_size = size; if (ret_min_size) *ret_min_size = min_size; if (ret_hints) *ret_hints = hints; return; } /* Estimate size and time needed to execute callee of EDGE assuming that parameters known to be constant at caller of EDGE are propagated. KNOWN_VALS and KNOWN_CONTEXTS are vectors of assumed known constant values and types for parameters. */ void estimate_ipcp_clone_size_and_time (struct cgraph_node *node, vec<tree> known_vals, vec<ipa_polymorphic_call_context> known_contexts, vec<ipa_agg_value_set> known_aggs, int *ret_size, sreal *ret_time, sreal *ret_nonspec_time, ipa_hints *hints) { clause_t clause, nonspec_clause; /* TODO: Also pass known value ranges. */ evaluate_conditions_for_known_args (node, false, known_vals, vNULL, known_aggs, &clause, &nonspec_clause); ipa_call_context ctx (node, clause, nonspec_clause, known_vals, known_contexts, known_aggs, vNULL); ctx.estimate_size_and_time (ret_size, NULL, ret_time, ret_nonspec_time, hints); } /* Return stack frame offset where frame of NODE is supposed to start inside of the function it is inlined to. Return 0 for functions that are not inlined. */ HOST_WIDE_INT ipa_get_stack_frame_offset (struct cgraph_node *node) { HOST_WIDE_INT offset = 0; if (!node->inlined_to) return 0; node = node->callers->caller; while (true) { offset += ipa_size_summaries->get (node)->estimated_self_stack_size; if (!node->inlined_to) return offset; node = node->callers->caller; } } /* Update summary information of inline clones after inlining. Compute peak stack usage. */ static void inline_update_callee_summaries (struct cgraph_node *node, int depth) { struct cgraph_edge *e; ipa_propagate_frequency (node); for (e = node->callees; e; e = e->next_callee) { if (!e->inline_failed) inline_update_callee_summaries (e->callee, depth); else ipa_call_summaries->get (e)->loop_depth += depth; } for (e = node->indirect_calls; e; e = e->next_callee) ipa_call_summaries->get (e)->loop_depth += depth; } /* Update change_prob of EDGE after INLINED_EDGE has been inlined. When function A is inlined in B and A calls C with parameter that changes with probability PROB1 and C is known to be passthrough of argument if B that change with probability PROB2, the probability of change is now PROB1*PROB2. */ static void remap_edge_change_prob (struct cgraph_edge *inlined_edge, struct cgraph_edge *edge) { if (ipa_node_params_sum) { int i; class ipa_edge_args *args = IPA_EDGE_REF (edge); if (!args) return; class ipa_call_summary *es = ipa_call_summaries->get (edge); class ipa_call_summary *inlined_es = ipa_call_summaries->get (inlined_edge); if (es->param.length () == 0) return; for (i = 0; i < ipa_get_cs_argument_count (args); i++) { struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i); if (jfunc->type == IPA_JF_PASS_THROUGH || jfunc->type == IPA_JF_ANCESTOR) { int id = jfunc->type == IPA_JF_PASS_THROUGH ? ipa_get_jf_pass_through_formal_id (jfunc) : ipa_get_jf_ancestor_formal_id (jfunc); if (id < (int) inlined_es->param.length ()) { int prob1 = es->param[i].change_prob; int prob2 = inlined_es->param[id].change_prob; int prob = combine_probabilities (prob1, prob2); if (prob1 && prob2 && !prob) prob = 1; es->param[i].change_prob = prob; } } } } } /* Update edge summaries of NODE after INLINED_EDGE has been inlined. Remap predicates of callees of NODE. Rest of arguments match remap_predicate. Also update change probabilities. */ static void remap_edge_summaries (struct cgraph_edge *inlined_edge, struct cgraph_node *node, class ipa_fn_summary *info, class ipa_node_params *params_summary, class ipa_fn_summary *callee_info, vec<int> operand_map, vec<int> offset_map, clause_t possible_truths, predicate *toplev_predicate) { struct cgraph_edge *e, *next; for (e = node->callees; e; e = next) { predicate p; next = e->next_callee; if (e->inline_failed) { class ipa_call_summary *es = ipa_call_summaries->get (e); remap_edge_change_prob (inlined_edge, e); if (es->predicate) { p = es->predicate->remap_after_inlining (info, params_summary, callee_info, operand_map, offset_map, possible_truths, *toplev_predicate); edge_set_predicate (e, &p); } else edge_set_predicate (e, toplev_predicate); } else remap_edge_summaries (inlined_edge, e->callee, info, params_summary, callee_info, operand_map, offset_map, possible_truths, toplev_predicate); } for (e = node->indirect_calls; e; e = next) { class ipa_call_summary *es = ipa_call_summaries->get (e); predicate p; next = e->next_callee; remap_edge_change_prob (inlined_edge, e); if (es->predicate) { p = es->predicate->remap_after_inlining (info, params_summary, callee_info, operand_map, offset_map, possible_truths, *toplev_predicate); edge_set_predicate (e, &p); } else edge_set_predicate (e, toplev_predicate); } } /* Same as remap_predicate, but set result into hint *HINT. */ static void remap_hint_predicate (class ipa_fn_summary *info, class ipa_node_params *params_summary, class ipa_fn_summary *callee_info, predicate **hint, vec<int> operand_map, vec<int> offset_map, clause_t possible_truths, predicate *toplev_predicate) { predicate p; if (!*hint) return; p = (*hint)->remap_after_inlining (info, params_summary, callee_info, operand_map, offset_map, possible_truths, *toplev_predicate); if (p != false && p != true) { if (!*hint) set_hint_predicate (hint, p); else **hint &= p; } } /* We inlined EDGE. Update summary of the function we inlined into. */ void ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge) { ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee); struct cgraph_node *to = (edge->caller->inlined_to ? edge->caller->inlined_to : edge->caller); class ipa_fn_summary *info = ipa_fn_summaries->get (to); clause_t clause = 0; /* not_inline is known to be false. */ size_time_entry *e; auto_vec<int, 8> operand_map; auto_vec<int, 8> offset_map; int i; predicate toplev_predicate; class ipa_call_summary *es = ipa_call_summaries->get (edge); class ipa_node_params *params_summary = (ipa_node_params_sum ? IPA_NODE_REF (to) : NULL); if (es->predicate) toplev_predicate = *es->predicate; else toplev_predicate = true; info->fp_expressions |= callee_info->fp_expressions; if (callee_info->conds) { auto_vec<tree, 32> known_vals; auto_vec<ipa_agg_value_set, 32> known_aggs; evaluate_properties_for_edge (edge, true, &clause, NULL, &known_vals, NULL, &known_aggs); } if (ipa_node_params_sum && callee_info->conds) { class ipa_edge_args *args = IPA_EDGE_REF (edge); int count = args ? ipa_get_cs_argument_count (args) : 0; int i; if (count) { operand_map.safe_grow_cleared (count); offset_map.safe_grow_cleared (count); } for (i = 0; i < count; i++) { struct ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i); int map = -1; /* TODO: handle non-NOPs when merging. */ if (jfunc->type == IPA_JF_PASS_THROUGH) { if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR) map = ipa_get_jf_pass_through_formal_id (jfunc); if (!ipa_get_jf_pass_through_agg_preserved (jfunc)) offset_map[i] = -1; } else if (jfunc->type == IPA_JF_ANCESTOR) { HOST_WIDE_INT offset = ipa_get_jf_ancestor_offset (jfunc); if (offset >= 0 && offset < INT_MAX) { map = ipa_get_jf_ancestor_formal_id (jfunc); if (!ipa_get_jf_ancestor_agg_preserved (jfunc)) offset = -1; offset_map[i] = offset; } } operand_map[i] = map; gcc_assert (map < ipa_get_param_count (params_summary)); } } sreal freq = edge->sreal_frequency (); for (i = 0; vec_safe_iterate (callee_info->size_time_table, i, &e); i++) { predicate p; p = e->exec_predicate.remap_after_inlining (info, params_summary, callee_info, operand_map, offset_map, clause, toplev_predicate); predicate nonconstp; nonconstp = e->nonconst_predicate.remap_after_inlining (info, params_summary, callee_info, operand_map, offset_map, clause, toplev_predicate); if (p != false && nonconstp != false) { sreal add_time = ((sreal)e->time * freq); int prob = e->nonconst_predicate.probability (callee_info->conds, clause, es->param); if (prob != REG_BR_PROB_BASE) add_time = add_time * prob / REG_BR_PROB_BASE; if (prob != REG_BR_PROB_BASE && dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\t\tScaling time by probability:%f\n", (double) prob / REG_BR_PROB_BASE); } info->account_size_time (e->size, add_time, p, nonconstp); } } remap_edge_summaries (edge, edge->callee, info, params_summary, callee_info, operand_map, offset_map, clause, &toplev_predicate); remap_hint_predicate (info, params_summary, callee_info, &callee_info->loop_iterations, operand_map, offset_map, clause, &toplev_predicate); remap_hint_predicate (info, params_summary, callee_info, &callee_info->loop_stride, operand_map, offset_map, clause, &toplev_predicate); HOST_WIDE_INT stack_frame_offset = ipa_get_stack_frame_offset (edge->callee); HOST_WIDE_INT peak = stack_frame_offset + callee_info->estimated_stack_size; if (info->estimated_stack_size < peak) info->estimated_stack_size = peak; inline_update_callee_summaries (edge->callee, es->loop_depth); if (info->call_size_time_table) { int edge_size = 0; sreal edge_time = 0; estimate_edge_size_and_time (edge, &edge_size, NULL, &edge_time, vNULL, vNULL, vNULL, 0); /* Unaccount size and time of the optimized out call. */ info->account_size_time (-edge_size, -edge_time, es->predicate ? *es->predicate : true, es->predicate ? *es->predicate : true, true); /* Account new calls. */ summarize_calls_size_and_time (edge->callee, info); } /* Free summaries that are not maintained for inline clones/edges. */ ipa_call_summaries->remove (edge); ipa_fn_summaries->remove (edge->callee); ipa_remove_from_growth_caches (edge); } /* For performance reasons ipa_merge_fn_summary_after_inlining is not updating overall size and time. Recompute it. If RESET is true also recompute call_time_size_table. */ void ipa_update_overall_fn_summary (struct cgraph_node *node, bool reset) { class ipa_fn_summary *info = ipa_fn_summaries->get (node); class ipa_size_summary *size_info = ipa_size_summaries->get (node); size_time_entry *e; int i; size_info->size = 0; info->time = 0; for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++) { size_info->size += e->size; info->time += e->time; } info->min_size = (*info->size_time_table)[0].size; if (reset) vec_free (info->call_size_time_table); if (node->callees || node->indirect_calls) estimate_calls_size_and_time (node, &size_info->size, &info->min_size, &info->time, NULL, ~(clause_t) (1 << predicate::false_condition), vNULL, vNULL, vNULL); size_info->size = RDIV (size_info->size, ipa_fn_summary::size_scale); info->min_size = RDIV (info->min_size, ipa_fn_summary::size_scale); } /* This function performs intraprocedural analysis in NODE that is required to inline indirect calls. */ static void inline_indirect_intraprocedural_analysis (struct cgraph_node *node) { ipa_analyze_node (node); if (dump_file && (dump_flags & TDF_DETAILS)) { ipa_print_node_params (dump_file, node); ipa_print_node_jump_functions (dump_file, node); } } /* Note function body size. */ void inline_analyze_function (struct cgraph_node *node) { push_cfun (DECL_STRUCT_FUNCTION (node->decl)); if (dump_file) fprintf (dump_file, "\nAnalyzing function: %s\n", node->dump_name ()); if (opt_for_fn (node->decl, optimize) && !node->thunk.thunk_p) inline_indirect_intraprocedural_analysis (node); compute_fn_summary (node, false); if (!optimize) { struct cgraph_edge *e; for (e = node->callees; e; e = e->next_callee) e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED; for (e = node->indirect_calls; e; e = e->next_callee) e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED; } pop_cfun (); } /* Called when new function is inserted to callgraph late. */ void ipa_fn_summary_t::insert (struct cgraph_node *node, ipa_fn_summary *) { inline_analyze_function (node); } /* Note function body size. */ static void ipa_fn_summary_generate (void) { struct cgraph_node *node; FOR_EACH_DEFINED_FUNCTION (node) if (DECL_STRUCT_FUNCTION (node->decl)) node->versionable = tree_versionable_function_p (node->decl); ipa_fn_summary_alloc (); ipa_fn_summaries->enable_insertion_hook (); ipa_register_cgraph_hooks (); FOR_EACH_DEFINED_FUNCTION (node) if (!node->alias && (flag_generate_lto || flag_generate_offload|| flag_wpa || opt_for_fn (node->decl, optimize))) inline_analyze_function (node); } /* Write inline summary for edge E to OB. */ static void read_ipa_call_summary (class lto_input_block *ib, struct cgraph_edge *e, bool prevails) { class ipa_call_summary *es = prevails ? ipa_call_summaries->get_create (e) : NULL; predicate p; int length, i; int size = streamer_read_uhwi (ib); int time = streamer_read_uhwi (ib); int depth = streamer_read_uhwi (ib); if (es) { es->call_stmt_size = size; es->call_stmt_time = time; es->loop_depth = depth; } bitpack_d bp = streamer_read_bitpack (ib); if (es) es->is_return_callee_uncaptured = bp_unpack_value (&bp, 1); else bp_unpack_value (&bp, 1); p.stream_in (ib); if (es) edge_set_predicate (e, &p); length = streamer_read_uhwi (ib); if (length && es && e->possibly_call_in_translation_unit_p ()) { es->param.safe_grow_cleared (length); for (i = 0; i < length; i++) es->param[i].change_prob = streamer_read_uhwi (ib); } else { for (i = 0; i < length; i++) streamer_read_uhwi (ib); } } /* Stream in inline summaries from the section. */ static void inline_read_section (struct lto_file_decl_data *file_data, const char *data, size_t len) { const struct lto_function_header *header = (const struct lto_function_header *) data; const int cfg_offset = sizeof (struct lto_function_header); const int main_offset = cfg_offset + header->cfg_size; const int string_offset = main_offset + header->main_size; class data_in *data_in; unsigned int i, count2, j; unsigned int f_count; lto_input_block ib ((const char *) data + main_offset, header->main_size, file_data->mode_table); data_in = lto_data_in_create (file_data, (const char *) data + string_offset, header->string_size, vNULL); f_count = streamer_read_uhwi (&ib); for (i = 0; i < f_count; i++) { unsigned int index; struct cgraph_node *node; class ipa_fn_summary *info; class ipa_node_params *params_summary; class ipa_size_summary *size_info; lto_symtab_encoder_t encoder; struct bitpack_d bp; struct cgraph_edge *e; predicate p; index = streamer_read_uhwi (&ib); encoder = file_data->symtab_node_encoder; node = dyn_cast<cgraph_node *> (lto_symtab_encoder_deref (encoder, index)); info = node->prevailing_p () ? ipa_fn_summaries->get_create (node) : NULL; params_summary = node->prevailing_p () ? IPA_NODE_REF (node) : NULL; size_info = node->prevailing_p () ? ipa_size_summaries->get_create (node) : NULL; int stack_size = streamer_read_uhwi (&ib); int size = streamer_read_uhwi (&ib); sreal time = sreal::stream_in (&ib); if (info) { info->estimated_stack_size = size_info->estimated_self_stack_size = stack_size; size_info->size = size_info->self_size = size; info->time = time; } bp = streamer_read_bitpack (&ib); if (info) { info->inlinable = bp_unpack_value (&bp, 1); info->fp_expressions = bp_unpack_value (&bp, 1); } else { bp_unpack_value (&bp, 1); bp_unpack_value (&bp, 1); } count2 = streamer_read_uhwi (&ib); gcc_assert (!info || !info->conds); if (info) vec_safe_reserve_exact (info->conds, count2); for (j = 0; j < count2; j++) { struct condition c; unsigned int k, count3; c.operand_num = streamer_read_uhwi (&ib); c.code = (enum tree_code) streamer_read_uhwi (&ib); c.type = stream_read_tree (&ib, data_in); c.val = stream_read_tree (&ib, data_in); bp = streamer_read_bitpack (&ib); c.agg_contents = bp_unpack_value (&bp, 1); c.by_ref = bp_unpack_value (&bp, 1); if (c.agg_contents) c.offset = streamer_read_uhwi (&ib); count3 = streamer_read_uhwi (&ib); c.param_ops = NULL; if (info) vec_safe_reserve_exact (c.param_ops, count3); if (params_summary) ipa_set_param_used_by_ipa_predicates (params_summary, c.operand_num, true); for (k = 0; k < count3; k++) { struct expr_eval_op op; enum gimple_rhs_class rhs_class; op.code = (enum tree_code) streamer_read_uhwi (&ib); op.type = stream_read_tree (&ib, data_in); switch (rhs_class = get_gimple_rhs_class (op.code)) { case GIMPLE_UNARY_RHS: op.index = 0; op.val[0] = NULL_TREE; op.val[1] = NULL_TREE; break; case GIMPLE_BINARY_RHS: case GIMPLE_TERNARY_RHS: bp = streamer_read_bitpack (&ib); op.index = bp_unpack_value (&bp, 2); op.val[0] = stream_read_tree (&ib, data_in); if (rhs_class == GIMPLE_BINARY_RHS) op.val[1] = NULL_TREE; else op.val[1] = stream_read_tree (&ib, data_in); break; default: fatal_error (UNKNOWN_LOCATION, "invalid fnsummary in LTO stream"); } if (info) c.param_ops->quick_push (op); } if (info) info->conds->quick_push (c); } count2 = streamer_read_uhwi (&ib); gcc_assert (!info || !info->size_time_table); if (info && count2) vec_safe_reserve_exact (info->size_time_table, count2); for (j = 0; j < count2; j++) { class size_time_entry e; e.size = streamer_read_uhwi (&ib); e.time = sreal::stream_in (&ib); e.exec_predicate.stream_in (&ib); e.nonconst_predicate.stream_in (&ib); if (info) info->size_time_table->quick_push (e); } p.stream_in (&ib); if (info) set_hint_predicate (&info->loop_iterations, p); p.stream_in (&ib); if (info) set_hint_predicate (&info->loop_stride, p); for (e = node->callees; e; e = e->next_callee) read_ipa_call_summary (&ib, e, info != NULL); for (e = node->indirect_calls; e; e = e->next_callee) read_ipa_call_summary (&ib, e, info != NULL); } lto_free_section_data (file_data, LTO_section_ipa_fn_summary, NULL, data, len); lto_data_in_delete (data_in); } /* Read inline summary. Jump functions are shared among ipa-cp and inliner, so when ipa-cp is active, we don't need to write them twice. */ static void ipa_fn_summary_read (void) { struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data (); struct lto_file_decl_data *file_data; unsigned int j = 0; ipa_prop_read_jump_functions (); ipa_fn_summary_alloc (); while ((file_data = file_data_vec[j++])) { size_t len; const char *data = lto_get_summary_section_data (file_data, LTO_section_ipa_fn_summary, &len); if (data) inline_read_section (file_data, data, len); else /* Fatal error here. We do not want to support compiling ltrans units with different version of compiler or different flags than the WPA unit, so this should never happen. */ fatal_error (input_location, "ipa inline summary is missing in input file"); } ipa_register_cgraph_hooks (); gcc_assert (ipa_fn_summaries); ipa_fn_summaries->enable_insertion_hook (); } /* Write inline summary for edge E to OB. */ static void write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e) { class ipa_call_summary *es = ipa_call_summaries->get (e); int i; streamer_write_uhwi (ob, es->call_stmt_size); streamer_write_uhwi (ob, es->call_stmt_time); streamer_write_uhwi (ob, es->loop_depth); bitpack_d bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, es->is_return_callee_uncaptured, 1); streamer_write_bitpack (&bp); if (es->predicate) es->predicate->stream_out (ob); else streamer_write_uhwi (ob, 0); streamer_write_uhwi (ob, es->param.length ()); for (i = 0; i < (int) es->param.length (); i++) streamer_write_uhwi (ob, es->param[i].change_prob); } /* Write inline summary for node in SET. Jump functions are shared among ipa-cp and inliner, so when ipa-cp is active, we don't need to write them twice. */ static void ipa_fn_summary_write (void) { struct output_block *ob = create_output_block (LTO_section_ipa_fn_summary); lto_symtab_encoder_iterator lsei; lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder; unsigned int count = 0; for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei); lsei_next_function_in_partition (&lsei)) { cgraph_node *cnode = lsei_cgraph_node (lsei); if (cnode->definition && !cnode->alias) count++; } streamer_write_uhwi (ob, count); for (lsei = lsei_start_function_in_partition (encoder); !lsei_end_p (lsei); lsei_next_function_in_partition (&lsei)) { cgraph_node *cnode = lsei_cgraph_node (lsei); if (cnode->definition && !cnode->alias) { class ipa_fn_summary *info = ipa_fn_summaries->get (cnode); class ipa_size_summary *size_info = ipa_size_summaries->get (cnode); struct bitpack_d bp; struct cgraph_edge *edge; int i; size_time_entry *e; struct condition *c; streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode)); streamer_write_hwi (ob, size_info->estimated_self_stack_size); streamer_write_hwi (ob, size_info->self_size); info->time.stream_out (ob); bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, info->inlinable, 1); bp_pack_value (&bp, false, 1); bp_pack_value (&bp, info->fp_expressions, 1); streamer_write_bitpack (&bp); streamer_write_uhwi (ob, vec_safe_length (info->conds)); for (i = 0; vec_safe_iterate (info->conds, i, &c); i++) { int j; struct expr_eval_op *op; streamer_write_uhwi (ob, c->operand_num); streamer_write_uhwi (ob, c->code); stream_write_tree (ob, c->type, true); stream_write_tree (ob, c->val, true); bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, c->agg_contents, 1); bp_pack_value (&bp, c->by_ref, 1); streamer_write_bitpack (&bp); if (c->agg_contents) streamer_write_uhwi (ob, c->offset); streamer_write_uhwi (ob, vec_safe_length (c->param_ops)); for (j = 0; vec_safe_iterate (c->param_ops, j, &op); j++) { streamer_write_uhwi (ob, op->code); stream_write_tree (ob, op->type, true); if (op->val[0]) { bp = bitpack_create (ob->main_stream); bp_pack_value (&bp, op->index, 2); streamer_write_bitpack (&bp); stream_write_tree (ob, op->val[0], true); if (op->val[1]) stream_write_tree (ob, op->val[1], true); } } } streamer_write_uhwi (ob, vec_safe_length (info->size_time_table)); for (i = 0; vec_safe_iterate (info->size_time_table, i, &e); i++) { streamer_write_uhwi (ob, e->size); e->time.stream_out (ob); e->exec_predicate.stream_out (ob); e->nonconst_predicate.stream_out (ob); } if (info->loop_iterations) info->loop_iterations->stream_out (ob); else streamer_write_uhwi (ob, 0); if (info->loop_stride) info->loop_stride->stream_out (ob); else streamer_write_uhwi (ob, 0); for (edge = cnode->callees; edge; edge = edge->next_callee) write_ipa_call_summary (ob, edge); for (edge = cnode->indirect_calls; edge; edge = edge->next_callee) write_ipa_call_summary (ob, edge); } } streamer_write_char_stream (ob->main_stream, 0); produce_asm (ob, NULL); destroy_output_block (ob); ipa_prop_write_jump_functions (); } /* Release function summary. */ void ipa_free_fn_summary (void) { if (!ipa_call_summaries) return; ggc_delete (ipa_fn_summaries); ipa_fn_summaries = NULL; delete ipa_call_summaries; ipa_call_summaries = NULL; edge_predicate_pool.release (); /* During IPA this is one of largest datastructures to release. */ if (flag_wpa) ggc_trim (); } /* Release function summary. */ void ipa_free_size_summary (void) { if (!ipa_size_summaries) return; delete ipa_size_summaries; ipa_size_summaries = NULL; } namespace { const pass_data pass_data_local_fn_summary = { GIMPLE_PASS, /* type */ "local-fnsummary", /* name */ OPTGROUP_INLINE, /* optinfo_flags */ TV_INLINE_PARAMETERS, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_local_fn_summary : public gimple_opt_pass { public: pass_local_fn_summary (gcc::context *ctxt) : gimple_opt_pass (pass_data_local_fn_summary, ctxt) {} /* opt_pass methods: */ opt_pass * clone () { return new pass_local_fn_summary (m_ctxt); } virtual unsigned int execute (function *) { return compute_fn_summary_for_current (); } }; // class pass_local_fn_summary } // anon namespace gimple_opt_pass * make_pass_local_fn_summary (gcc::context *ctxt) { return new pass_local_fn_summary (ctxt); } /* Free inline summary. */ namespace { const pass_data pass_data_ipa_free_fn_summary = { SIMPLE_IPA_PASS, /* type */ "free-fnsummary", /* name */ OPTGROUP_NONE, /* optinfo_flags */ TV_IPA_FREE_INLINE_SUMMARY, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_ipa_free_fn_summary : public simple_ipa_opt_pass { public: pass_ipa_free_fn_summary (gcc::context *ctxt) : simple_ipa_opt_pass (pass_data_ipa_free_fn_summary, ctxt), small_p (false) {} /* opt_pass methods: */ opt_pass *clone () { return new pass_ipa_free_fn_summary (m_ctxt); } void set_pass_param (unsigned int n, bool param) { gcc_assert (n == 0); small_p = param; } virtual bool gate (function *) { return true; } virtual unsigned int execute (function *) { ipa_free_fn_summary (); if (!flag_wpa) ipa_free_size_summary (); return 0; } private: bool small_p; }; // class pass_ipa_free_fn_summary } // anon namespace simple_ipa_opt_pass * make_pass_ipa_free_fn_summary (gcc::context *ctxt) { return new pass_ipa_free_fn_summary (ctxt); } namespace { const pass_data pass_data_ipa_fn_summary = { IPA_PASS, /* type */ "fnsummary", /* name */ OPTGROUP_INLINE, /* optinfo_flags */ TV_IPA_FNSUMMARY, /* tv_id */ 0, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ ( TODO_dump_symtab ), /* todo_flags_finish */ }; class pass_ipa_fn_summary : public ipa_opt_pass_d { public: pass_ipa_fn_summary (gcc::context *ctxt) : ipa_opt_pass_d (pass_data_ipa_fn_summary, ctxt, ipa_fn_summary_generate, /* generate_summary */ ipa_fn_summary_write, /* write_summary */ ipa_fn_summary_read, /* read_summary */ NULL, /* write_optimization_summary */ NULL, /* read_optimization_summary */ NULL, /* stmt_fixup */ 0, /* function_transform_todo_flags_start */ NULL, /* function_transform */ NULL) /* variable_transform */ {} /* opt_pass methods: */ virtual unsigned int execute (function *) { return 0; } }; // class pass_ipa_fn_summary } // anon namespace ipa_opt_pass_d * make_pass_ipa_fn_summary (gcc::context *ctxt) { return new pass_ipa_fn_summary (ctxt); } /* Reset all state within ipa-fnsummary.c so that we can rerun the compiler within the same process. For use by toplev::finalize. */ void ipa_fnsummary_c_finalize (void) { ipa_free_fn_summary (); }
Load.c
/***************************************************************************** ! * ! * Elmer, A Finite Element Software for Multiphysical Problems ! * ! * Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland ! * ! * This library is free software; you can redistribute it and/or ! * modify it under the terms of the GNU Lesser General Public ! * License as published by the Free Software Foundation; either ! * version 2.1 of the License, or (at your option) any later version. ! * ! * This library is distributed in the hope that it will be useful, ! * but WITHOUT ANY WARRANTY; without even the implied warranty of ! * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ! * Lesser General Public License for more details. ! * ! * You should have received a copy of the GNU Lesser General Public ! * License along with this library (in file ../LGPL-2.1); if not, write ! * to the Free Software Foundation, Inc., 51 Franklin Street, ! * Fifth Floor, Boston, MA 02110-1301 USA ! * ! ****************************************************************************** ! * ! * Utilities for dynamic loading of user functions, and other operating ! * system interfaces. ! * ! ****************************************************************************** ! * ! * Authors: Juha Ruokolainen ! * Email: Juha.Ruokolainen@csc.fi ! * Web: http://www.csc.fi/elmer ! * Address: CSC - IT Center for Science Ltd. ! * Keilaranta 14 ! * 02101 Espoo, Finland ! * ! * Original Date: 02 Jun 1997 ! * ! *****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> /* #include <elmer/matc.h> maybe in the future */ /* eg. FC_CHAR_PTR and FC_FUNC is defined here */ #include "../config.h" #if defined(WIN32) | defined(MINGW32) # include <direct.h> # include <windows.h> #define ELMER_PATH_SEPARATOR ";" #else #include <strings.h> # include <dlfcn.h> # include <sys/stat.h> #define ELMER_PATH_SEPARATOR ":" #endif #define MAX_PATH_LEN 512 #define ERROR_BUF_LEN 10*MAX_PATH_LEN #ifndef USE_ISO_C_BINDINGS #ifdef SGI64 void corename_() { #include <sys/types.h> #include <sys/resource.h> #include <sys/prctl.h> prctl( PR_COREPID,0,0 ); } #endif #endif /* pc needs more bits on 64bit arch */ #ifdef ARCH_32_BITS #define f_ptr int32_t * #else #define f_ptr int64_t * #endif /*#if defined(MINGW32)*/ /*-------------------------------------------------------------------------- work around mingw rxvt shell stdio/err buffering troubles -------------------------------------------------------------------------*/ /*void STDCALLBULL FC_FUNC_(set_stdio_bufs,SET_STDIO_BUFS) ()*/ /*[>void set_stdio_bufs_()<]*/ /*{*/ /*setvbuf( stdout, NULL, _IOLBF, 2048 );*/ /*setvbuf( stderr, NULL, _IONBF, 2048 );*/ /*}*/ /*#endif*/ /*-------------------------------------------------------------------------- This routine will return the home directory of elmer solver. -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL getsolverhome( char *solverDir, int *len) #else void STDCALLBULL FC_FUNC(getsolverhome,GETSOLVERHOME) ( char *solverDir, int *len) #endif { *len = 0; char *elmer_home = getenv("ELMER_HOME"); if(elmer_home != NULL) { /* Return solver home relative to ELMER_HOME*/ #if defined(WIN32) || defined(MINGW32) _snprintf(solverDir, MAX_PATH_LEN, "%s\\share\\elmersolver", elmer_home); #else snprintf(solverDir, MAX_PATH_LEN, "%s/share/elmersolver", elmer_home); #endif *len = strlen(elmer_home) + 18; if(*len > MAX_PATH_LEN) *len = MAX_PATH_LEN; return; } #if defined(WIN32) || defined(MINGW32) static char appPath[MAX_PATH_LEN] = ""; static char appDir[MAX_PATH_LEN] = ""; char *exeName = NULL; int n = 0; /* Get the full module file name */ GetModuleFileName(NULL, appPath, MAX_PATH_LEN); if(appPath == NULL) return; exeName = strrchr(appPath, '\\'); if(exeName == NULL) return; n = (int)(exeName - appPath); if(n < 0) return; /* play safe */ if(n > MAX_PATH_LEN) n = MAX_PATH_LEN; /* This is where the executable resides */ strncpy(appDir, appPath, n); /* Return solver home relative to appDir */ _snprintf(solverDir, MAX_PATH_LEN, "%s\\..\\share\\elmersolver", appDir); *len = n + 21; if(*len > MAX_PATH_LEN) *len = MAX_PATH_LEN; #else /* Use the directory defined in config.h */ snprintf(solverDir, MAX_PATH_LEN, "%s", ELMER_SOLVER_HOME); *len = strlen(ELMER_SOLVER_HOME); if(*len > MAX_PATH_LEN) *len = MAX_PATH_LEN; #endif } /*-------------------------------------------------------------------------- This routine will create a directory given name of the directory. -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL makedirectory(char *Name) #else void STDCALLBULL FC_FUNC(makedirectory,MAKEDIRECTORY) (char *Name) #endif { #if defined(WIN32) || defined(MINGW32) if ( _mkdir( Name ) != 0 ) { #else if ( mkdir( Name, 0700 ) != 0 ) { chmod( Name, 0700 ); #endif } } #ifndef USE_ISO_C_BINDINGS /*-------------------------------------------------------------------------- This routine execute a operating system command. -------------------------------------------------------------------------*/ void STDCALLBULL FC_FUNC(systemc,SYSTEMC) ( char *str ) { system( str ); } /*-------------------------------------------------------------------------- This routine will return value of a environment variable to a given string variable. -------------------------------------------------------------------------*/ void STDCALLBULL FC_FUNC(envir,ENVIR) (char *Name, char *Value, int *len) { if ( getenv( Name ) ) { strncpy( Value,(char *)getenv(Name), MAX_PATH_LEN ); *len = strlen( Value ); } else { *len = 0; *Value = '\0'; } } #endif /*-------------------------------------------------------------------------- Internal: convert function names into to fortran mangled form for dynamical loading ---------------------------------------------------------------------------*/ static void STDCALLBULL fortranMangle(char *orig, char *mangled) { int uscore, i; strcpy( mangled, orig ); if(ELMER_LINKTYP == 1 || ELMER_LINKTYP == 3 || ELMER_LINKTYP == 4) { for( i=0 ; i<strlen(mangled) ; i++ ) /* to lower case */ { if ( mangled[i] >= 'A' && mangled[i] <= 'Z' ) mangled[i] += 'a' - 'A'; } } if(ELMER_LINKTYP == 2) { for( i=0; i<strlen(mangled); i++ ) /* to upper case */ { if ( mangled[i] >= 'a' && mangled[i] <= 'z' ) mangled[i] += 'A' - 'a'; } } if(ELMER_LINKTYP == 1) /* underscore */ { strcat( mangled, "_" ); } else if(ELMER_LINKTYP == 4) /* 1-2 underscores */ { uscore = 0; for( i=0; i<strlen(mangled); i++ ) if(mangled[i] == '_') uscore++; if(uscore == 0) { strcat( mangled, "_" ); } else { strcat( mangled, "__" ); } } } /*-------------------------------------------------------------------------- INTERNAL: Appends two paths with slash checking Args: path1, path2 - string to join -------------------------------------------------------------------------*/ static void STDCALLBULL append_path(char *path1, char *path2) { size_t len1; len1 = strnlen(path1, 2*MAX_PATH_LEN); #if defined(WIN32) || defined(MINGW) if (path1[len1-1] != '\\') { strncat(path1, "\\", 2*MAX_PATH_LEN); } #else if (path1[len1-1] != '/') { strncat(path1, "/", 2*MAX_PATH_LEN); } #endif strncat(path1, path2, 2*MAX_PATH_LEN); } /*-------------------------------------------------------------------------- INTERNAL: Tries to open library with dlopen, first without any extensions and then with SHL_EXTENSION. Args: Libname - name of the library file Handle - handle to the dl, NULL if fails error_buf - string buffer for error messages -------------------------------------------------------------------------*/ static void STDCALLBULL try_dlopen(char *LibName, void **Handle, char *errorBuf) { static char dl_names[2][2*MAX_PATH_LEN]; char error_tmp[MAX_PATH_LEN]; int i; strncpy(dl_names[0], LibName, 2*MAX_PATH_LEN); strncpy(dl_names[1], LibName, 2*MAX_PATH_LEN); strncat(dl_names[1], SHL_EXTENSION, MAX_PATH_LEN); for (i = 0; i < 2; i++) { #ifdef HAVE_DLOPEN_API if ((*Handle = dlopen(dl_names[i], RTLD_NOW)) == NULL) { strncat(errorBuf, dlerror(), MAX_PATH_LEN); strncat(errorBuf, "\n", MAX_PATH_LEN); } else { break; } #elif defined(HAVE_LOADLIBRARY_API) if ((*Handle = LoadLibrary(dl_names[i])) == NULL) { sprintf(error_tmp, "Can not find %s.\n", dl_names[i]); strncat(errorBuf, error_tmp, ERROR_BUF_LEN); } else { break; } #endif } } /*-------------------------------------------------------------------------- INTERNAL: Parses the search path and tries to open a solver. First search is done without any path prefixes. Args: SearchPath - colon separated list of searhc paths Library - name of the library file to be opened Handle - handle to the dl file, NULL if fails error_buf - string buffer for error messages --------------------------------------------------------------------------*/ static void STDCALLBULL try_open_solver(char *SearchPath, char *Library, void **Handle, char *errorBuf) { static char CurrentLib[2*MAX_PATH_LEN]; char *tok; /* Try to open first without any prefixes */ try_dlopen(Library, Handle, errorBuf); /* and then using the provided paths */ if (*Handle == NULL) { tok = strtok(SearchPath, ELMER_PATH_SEPARATOR); while (tok != NULL) { strncpy(CurrentLib, tok, 2*MAX_PATH_LEN); append_path(CurrentLib, Library); try_dlopen(CurrentLib, Handle, errorBuf); if (*Handle != NULL) break; tok = strtok(NULL, ELMER_PATH_SEPARATOR); } } } /*-------------------------------------------------------------------------- This routine will return address of a function given path to a dynamically loaded library and name of the routine. -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void *STDCALLBULL loadfunction_c( int *Quiet, int *abort_not_found, char *Library, char *Name ) #else void *STDCALLBULL FC_FUNC(loadfunction,LOADFUNCTION) ( int *Quiet, int *abort_not_found, char *Library, char *Name ) #endif { /*--------------------------------------------------------------------------*/ void (*Function)(),*Handle; char *cptr; static char ElmerLib[2*MAX_PATH_LEN], NewLibName[3*MAX_PATH_LEN], NewName[MAX_PATH_LEN], ErrorBuffer[ERROR_BUF_LEN]; /*--------------------------------------------------------------------------*/ static char appPath[MAX_PATH_LEN] = ""; char *exeName = NULL; int n = 0; /*--------------------------------------------------------------------------*/ memset(appPath, 0, MAX_PATH_LEN); memset(ElmerLib, 0, 2*MAX_PATH_LEN); memset(NewLibName, 0, 3*MAX_PATH_LEN); memset(NewName, 0, MAX_PATH_LEN); memset(ErrorBuffer, 0, ERROR_BUF_LEN); /*--------------------------------------------------------------------------*/ fortranMangle( Name, NewName ); strncpy( NewLibName, Library, 3*MAX_PATH_LEN ); if ( *Quiet==0 ) { fprintf(stdout,"Loading user function library: [%s]...[%s]\n", Library, Name ); fflush(stdout); } /* First path is always current directory (.) */ strncpy(ElmerLib, ".", 2*MAX_PATH_LEN); cptr = (char *)getenv( "ELMER_LIB" ); if ( cptr != NULL ) { strncat( ElmerLib, ELMER_PATH_SEPARATOR, 2*MAX_PATH_LEN ); strncat( ElmerLib, cptr, 2*MAX_PATH_LEN ); } else { cptr = (char *)getenv("ELMER_HOME"); if ( cptr != NULL ) { strncat( ElmerLib, ELMER_PATH_SEPARATOR, 2*MAX_PATH_LEN); strncat( ElmerLib, cptr, 2*MAX_PATH_LEN ); strncat( ElmerLib, "/share/elmersolver/lib", 2*MAX_PATH_LEN ); } else { #if defined(WIN32) || defined(MINGW32) /* Should not get here unless WIN32 implements DLOPEN_API */ GetModuleFileName(NULL, appPath, MAX_PATH_LEN); exeName = strrchr(appPath, '\\'); n = (int)(exeName - appPath); if(n < 0) n = 0; if(n > MAX_PATH_LEN) n = MAX_PATH_LEN; strncat(ElmerLib, ELMER_PATH_SEPARATOR, 2*MAX_PATH_LEN); strncat(ElmerLib, appPath, n); strncat(ElmerLib, "\\..\\share\\elmersolver\\lib", 2*MAX_PATH_LEN); #else strncat( ElmerLib, ELMER_PATH_SEPARATOR, 2*MAX_PATH_LEN ); strncat( ElmerLib, ELMER_SOLVER_HOME, 2*MAX_PATH_LEN ); strncat( ElmerLib, "/lib", 2*MAX_PATH_LEN ); #endif } } cptr = (char *)getenv( "ELMER_MODULES_PATH" ); if ( cptr != NULL ) { strncat( ElmerLib, ELMER_PATH_SEPARATOR, 2*MAX_PATH_LEN); strncat( ElmerLib, cptr, 2*MAX_PATH_LEN); } try_open_solver(ElmerLib, Library, &Handle, ErrorBuffer); if ( Handle == NULL ) { fprintf(stderr, "%s", ErrorBuffer); exit(0); } #ifdef HAVE_DLOPEN_API if ( (Function = (void(*)())dlsym( Handle,NewName)) == NULL && *abort_not_found ) { fprintf( stderr, "Load: FATAL: Can't find procedure [%s]\n", NewName ); exit(0); } #elif defined(HAVE_LOADLIBRARY_API) if ( (Function = (void *)GetProcAddress(Handle,NewName)) == NULL && *abort_not_found ) { fprintf( stderr,"Load: FATAL: Can't find procedure [%s]\n", NewName ); exit(0); } #endif return (void *)Function; } /*-------------------------------------------------------------------------- INTERNAL: Execute given function returning integer value -------------------------------------------------------------------------*/ static int IntExec( int (STDCALLBULL *Function)(),void *Model ) { return (*Function)( Model ); } /*-------------------------------------------------------------------------- Execute given function returning integer value -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS int STDCALLBULL execintfunction_c( f_ptr Function,void *Model ) #else int STDCALLBULL FC_FUNC(execintfunction,EXECINTFUNCTION) ( f_ptr Function,void *Model ) #endif { return IntExec( (int (STDCALLBULL *)())*Function,Model ); } /*-------------------------------------------------------------------------- INTERNAL: Execute given function returning double value -------------------------------------------------------------------------*/ static void DoubleArrayExec( double *(STDCALLBULL *Function)(), void *Model, int *Node, double *Value, double *Array ) { (*Function)( Model,Node,Value,Array ); } /*-------------------------------------------------------------------------- Execute given function returning double value -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL execrealarrayfunction_c( f_ptr Function, void *Model, int *Node, double *Value, double *Array ) #else void STDCALLBULL FC_FUNC(execrealarrayfunction,EXECREALARRAYFUNCTION) ( f_ptr Function, void *Model, int *Node, double *Value, double *Array ) #endif { DoubleArrayExec( (double*(STDCALLBULL *)())*Function,Model,Node,Value, Array ); } /*-------------------------------------------------------------------------- INTERNAL: Execute given function returning double value -------------------------------------------------------------------------*/ static double DoubleExec( double (STDCALLBULL *Function)(), void *Model, int *Node, double *Value ) { return (*Function)( Model,Node,Value ); } /*-------------------------------------------------------------------------- Execute given function returning double value -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS double STDCALLBULL execrealfunction_c( f_ptr Function, void *Model, int *Node, double *Value ) #else double STDCALLBULL FC_FUNC(execrealfunction,EXECREALFUNCTION) ( f_ptr Function, void *Model, int *Node, double *Value ) #endif { return DoubleExec( (double (STDCALLBULL *)())*Function,Model,Node,Value ); } /*-------------------------------------------------------------------------- INTERNAL: Execute given function returning double value -------------------------------------------------------------------------*/ static double ConstDoubleExec( double (STDCALLBULL *Function)(), void *Model, double *x, double *y, double *z ) { return (*Function)( Model, x,y,z ); } /*-------------------------------------------------------------------------- Execute given function returning double value -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS double STDCALLBULL execconstrealfunction_c( f_ptr Function, void *Model, double *x, double *y, double *z ) #else double STDCALLBULL FC_FUNC(execconstrealfunction,EXECCONSTREALFUNCTION) ( f_ptr Function, void *Model, double *x, double *y, double *z ) #endif { return ConstDoubleExec( (double (STDCALLBULL *)())*Function,Model,x,y,z ); } /*-------------------------------------------------------------------------- Return argument (just to fool Fortran type checking) -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void *STDCALLBULL addrfunc_c( void *Function ) #else void *STDCALLBULL FC_FUNC(addrfunc,ADDRFUNC) ( void *Function ) #endif { return (void *)Function; } /*-------------------------------------------------------------------------- INTERNAL: Call solver routines at given address -------------------------------------------------------------------------*/ static void DoExecSolver( void (STDCALLBULL *SolverProc)(), void *Model, void *Solver, void *dt, void *Transient) { (*SolverProc)( Model,Solver,dt,Transient ); return; } /*-------------------------------------------------------------------------- Call solver routines at given address -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL execsolver_c( f_ptr *SolverProc, void *Model, void *Solver, void *dt, void *Transient ) #else void STDCALLBULL FC_FUNC(execsolver,EXECSOLVER) ( f_ptr *SolverProc, void *Model, void *Solver, void *dt, void *Transient ) #endif { DoExecSolver( (void (STDCALLBULL *)())*SolverProc,Model,Solver,dt,Transient ); } /*-------------------------------------------------------------------------- INTERNAL: Call lin. solve routines at given address -------------------------------------------------------------------------*/ static int DoLinSolveProcs( int (STDCALLBULL *SolverProc)(), void *Model, void *Solver, void *Matrix, void *b, void *x, void *n, void *DOFs, void *Norm ) { return (*SolverProc)( Model,Solver,Matrix,b,x,n, DOFs,Norm ); } /*-------------------------------------------------------------------------- Call lin. solver routines at given address -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS int STDCALLBULL execlinsolveprocs_c( f_ptr *SolverProc, void *Model, void *Solver, void *Matrix, void *b, void *x, void *n, void *DOFs, void *Norm ) #else int STDCALLBULL FC_FUNC(execlinsolveprocs,EXECLINSOLVEPROCS) ( f_ptr *SolverProc, void *Model, void *Solver, void *Matrix, void *b, void *x, void *n, void *DOFs, void *Norm ) #endif { return DoLinSolveProcs( (int (STDCALLBULL *)())*SolverProc,Model,Solver,Matrix,b,x,n,DOFs,Norm ); } char *mtc_domath(char *); void mtc_init(FILE *,FILE *, FILE *); /*-------------------------------------------------------------------------- This routine will call matc and return matc variable array values -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL matc_get_array(char *name, double *values, int *nrows, int *ncols ) #else void STDCALLBULL FC_FUNC_(matc_get_array,MATC_GET_ARRAY) (char *name, double *values, int *nrows, int *ncols ) #endif { var_copy_transpose(name,values,*nrows,*ncols); } /*-------------------------------------------------------------------------- This routine will call matc and return matc result -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL matc( char *cmd, char *Value, int *len ) #else void STDCALLBULL FC_FUNC(matc,MATC) ( char *cmd, char *Value, int *len ) #endif { #define MAXLEN 8192 static int been_here = 0; char *ptr, c, cc[32]; int slen, start; #pragma omp threadprivate(been_here) /* MB: Critical section removed since Matc library * modified to be thread safe */ slen = *len; if ( been_here==0 ) { mtc_init( NULL, stdout, stderr ); strcpy( cc, "format( 12,\"rowform\")" ); mtc_domath( cc ); been_here = 1; } c = cmd[slen]; cmd[slen] = '\0'; start = 0; if (strncmp(cmd,"nc:",3)==0) start=3; ptr = (char *)mtc_domath(&cmd[start]); if ( ptr ) { strcpy( Value, (char *)ptr ); *len = strlen(Value)-1; /* ignore linefeed! */ if ( strncmp(Value, "MATC ERROR:",11)==0 || strncmp(Value,"WARNING:",8)==0 ) { if (start==0) { fprintf( stderr, "Solver input file error: %s\n", Value ); fprintf( stderr, "...offending input line: %s\n", cmd ); exit(0); } else { Value[0]=' '; *len = 0; } } } else { *len = 0; *Value = ' '; } cmd[slen]=c; } /*-------------------------------------------------------------------------- INTERNAL: execute user material function -------------------------------------------------------------------------*/ static double DoViscFunction(double (STDCALLBULL *SolverProc)(), void *Model, void *Element, void *Nodes, void *n, void *Basis, void *GradBasis, void *Viscosity, void *Velo, void *GradV ) { double s; s = (*SolverProc)( Model,Element,Nodes,n,Basis,GradBasis, Viscosity, Velo, GradV ); return s; } /*-------------------------------------------------------------------------- This routine will call user defined material def. function -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS double STDCALLBULL materialuserfunction_c( f_ptr Function, void *Model, void *Element, void *Nodes, void *n, void *nd, void *Basis, void *GradBasis, void *Viscosity, void *Velo, void *gradV ) #else double STDCALLBULL FC_FUNC(materialuserfunction,MATERIALUSERFUNCTION) ( f_ptr Function, void *Model, void *Element, void *Nodes, void *n, void *nd, void *Basis, void *GradBasis, void *Viscosity, void *Velo, void *gradV ) #endif { return DoViscFunction( (double (STDCALLBULL *)())*Function,Model,Element,Nodes,n,Basis, GradBasis,Viscosity,Velo,gradV ); } /*-------------------------------------------------------------------------- INTERNAL: execute user material function -------------------------------------------------------------------------*/ static void DoSimulationProc( void (STDCALLBULL *SimulationProc)(), void *Model ) { (*SimulationProc)( Model ); } /*-------------------------------------------------------------------------- This routine will call user defined material def. function -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL execsimulationproc_c( f_ptr Function, void *Model ) #else void STDCALLBULL FC_FUNC(execsimulationproc,EXECSIMULATIONPROC) ( f_ptr Function, void *Model ) #endif { DoSimulationProc( (void (STDCALLBULL *)())*Function,Model ); } /*-------------------------------------------------------------------------- INTERNAL: execute (Krylov) iterator -------------------------------------------------------------------------*/ static void DoIterCall( void (STDCALLBULL *iterProc)(), void *x,void *b,void *ipar,void *dpar,void *work, void (STDCALLBULL *mvProc)(), void (STDCALLBULL *pcondProc)(), void (STDCALLBULL *pcondrProc)(), void (STDCALLBULL *dotProc)(), void (STDCALLBULL *normProc)(), void (STDCALLBULL *STOPC)() ) { (*iterProc)( x,b,ipar,dpar,work,mvProc,pcondProc, pcondrProc,dotProc,normProc,STOPC ); } /*-------------------------------------------------------------------------- This routine will call (Krylov) iterator -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL itercall_c( f_ptr iterProc, void *x, void *b, void *ipar, void *dpar, void *work, f_ptr mvProc, f_ptr pcondProc, f_ptr pcondrProc, f_ptr dotProc, f_ptr normProc, f_ptr STOPC ) #else void STDCALLBULL FC_FUNC(itercall,ITERCALL) ( f_ptr iterProc, void *x, void *b, void *ipar, void *dpar, void *work, f_ptr mvProc, f_ptr pcondProc, f_ptr pcondrProc, f_ptr dotProc, f_ptr normProc, f_ptr STOPC ) #endif { DoIterCall( (void (STDCALLBULL *)())*iterProc,x,b,ipar,dpar,work, (void (STDCALLBULL *)())*mvProc, (void (STDCALLBULL *)())*pcondProc, (void (STDCALLBULL *)())*pcondrProc, (void (STDCALLBULL *)())*dotProc, (void (STDCALLBULL *)())*normProc, (void (STDCALLBULL *)())*STOPC ); } /*-------------------------------------------------------------------------- INTERNAL: execute localmatrix call -------------------------------------------------------------------------*/ static void DoLocalCall( void (STDCALLBULL *localProc)(), void *Model,void *Solver,void *G, void *F, void *Element,void *n,void *nd ) { (*localProc)( Model, Solver, G, F, Element, n, nd ); } /*-------------------------------------------------------------------------- This routine will call local matrix add-on -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL execlocalproc_c( f_ptr localProc, void *Model,void *Solver, void *G, void *F, void *Element,void *n,void *nd ) #else void STDCALLBULL FC_FUNC(execlocalproc, EXECLOCALPROC ) ( f_ptr localProc, void *Model,void *Solver,void *G, void *F, void *Element,void *n,void *nd ) #endif { DoLocalCall( (void (STDCALLBULL *)())*localProc,Model,Solver,G,F,Element,n,nd ); } /*-------------------------------------------------------------------------- INTERNAL: execute complete localmatrix call -------------------------------------------------------------------------*/ static void DoLocalAssembly( void (STDCALLBULL *LocalAssembly)(), void *Model,void *Solver,void *dt,void *transient,void *M, void *D, void *S,void *F, void *Element,void *n,void *nd ) { (*LocalAssembly)( Model, Solver, dt, transient, M, D, S, F, Element, n, nd ); } /*-------------------------------------------------------------------------- This routine will call complete local matrix add-on -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL execlocalassembly_c( f_ptr LocalAssembly, void *Model, void *Solver,void *dt,void *transient, void *M, void *D, void *S,void *F, void *Element,void *n,void *nd ) #else void STDCALLBULL FC_FUNC(execlocalassembly, EXECLOCALASSEMBLY ) ( f_ptr LocalAssembly, void *Model,void *Solver,void *dt,void *transient,void *M, void *D, void *S,void *F,void *Element,void *n,void *nd ) #endif { DoLocalAssembly( (void (STDCALLBULL *)())*LocalAssembly,Model,Solver,dt,transient,M,D,S,F,Element,n,nd ); } /*-------------------------------------------------------------------------- INTERNAL: execute complete localmatrix call -------------------------------------------------------------------------*/ static void DoMatVecSubr( void (STDCALLBULL *matvec)(), void **SpMV,void *n,void *rows,void *cols,void *vals,void *u, void *v, void *reinit ) { (*matvec)( SpMV,n,rows,cols,vals,u,v,reinit); } /*-------------------------------------------------------------------------- This routine will call complete local matrix add-on -------------------------------------------------------------------------*/ #ifdef USE_ISO_C_BINDINGS void STDCALLBULL matvecsubrext_c( f_ptr matvec, void **SpMV, void *n, void *rows, void *cols, void *vals, void *u, void *v,void *reinit ) #else void STDCALLBULL FC_FUNC(matvecsubr, MMATVECSUBR) ( f_ptr matvec, void **SpMV, void *n, void *rows, void *cols, void *vals, void *u, void *v,void *reinit ) #endif { DoMatVecSubr( (void (STDCALLBULL *)())*matvec,SpMV,n,rows,cols,vals,u,v,reinit); }
convolution_1x1_pack8to16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack8to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack8to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack8to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { __m256 _v = _mm256_load_ps(r0); _mm256_store_ps(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to16_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
GB_unop__ainv_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_fp64_fp64 // op(A') function: GB_unop_tran__ainv_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = -z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
update_W_b.c
#include "q_incs.h" #include "dnn_types.h" #include "update_W_b.h" int update_W_b( float ***W, float ***dW, float **b, float **db, int nl, int *npl, bool **d, // true => dropout; false => keep float alpha // learning rate ) { int status = 0; // Updates the 'W' and 'b' for ( int l = 1; l < nl; l++ ) { // for layer, starting from one float **W_l = W[l]; float **dW_l = dW[l]; float *b_l = b[l]; float *db_l = db[l]; bool *d_l = d[l]; #pragma omp parallel for for ( int jprime = 0; jprime < npl[l-1]; jprime++ ) { // for neurons in layer l-1 if ( d_l[jprime] ) { continue; } // TODO: Study carefully float *W_l_jprime = W_l[jprime]; float *dW_l_jprime = dW_l[jprime]; #pragma omp simd for ( int j = 0; j < npl[l]; j++ ) { // for neurons in layer l W_l_jprime[j] -= ( alpha * dW_l_jprime[j] ); #ifdef COUNT num_b_flops += 2; #endif } /* above is equivalent to below for ( int j = 0; j < npl[l]; j++ ) { *W_l_jprime++ -= ( alpha * *dW_l_jprime++ ); } */ } #pragma omp simd for ( int j = 0; j < npl[l]; j++ ) { b_l[j] -= ( alpha * db_l[j] ); #ifdef COUNT num_b_flops += 2; #endif } } BYE: return status; }
zlange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lange * * Returns the norm of a general matrix as * * zlange = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: max norm * - PlasmaOneNorm: one norm * - PlasmaInfNorm: infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] pA * The m-by-n matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval double * The specified norm of the general matrix A. * ******************************************************************************* * * @sa plasma_omp_zlange * @sa plasma_clange * @sa plasma_dlange * @sa plasma_slange * ******************************************************************************/ double plasma_zlange(plasma_enum_t norm, int m, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, m)) { printf("%d\n", lda); plasma_error("illegal value of lda"); return -5; } // quick return if (imin(n, m) == 0) return 0.0; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double)); break; case PlasmaOneNorm: work = (double*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(double)); break; case PlasmaInfNorm: work = (double*)malloc(((size_t)A.nt*A.m+A.m)*sizeof(double)); break; case PlasmaFrobeniusNorm: work = (double*)malloc((size_t)2*A.mt*A.nt*sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; double value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); // Call tile async function. plasma_omp_zlange(norm, A, work, &value, sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Destroy sequence. plasma_sequence_destroy(sequence); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lange * * Calculates the max, one, infinity or Frobenius norm of a general matrix. * Non-blocking equivalent of plasma_zlange(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.nt*A.m + A.m * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zlange * @sa plasma_omp_clange * @sa plasma_omp_dlange * @sa plasma_omp_slange * ******************************************************************************/ void plasma_omp_zlange(plasma_enum_t norm, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pzlange(norm, A, work, value, sequence, request); }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; DependentCounters[i] = nullptr; DependentInits[i] = nullptr; FinalsConditions[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. const Stmt *Body = getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); if (auto *For = dyn_cast<ForStmt>(Body)) { Body = For->getBody(); } else { assert(isa<CXXForRangeStmt>(Body) && "Expected canonical for loop or range-based for loop."); Body = cast<CXXForRangeStmt>(Body)->getBody(); } for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); if (auto *For = dyn_cast<ForStmt>(Body)) { Body = For->getBody(); } else { assert(isa<CXXForRangeStmt>(Body) && "Expected canonical for loop or range-based for loop."); Body = cast<CXXForRangeStmt>(Body)->getBody(); } } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopSimdDirectiveClass, OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopDirectiveClass, OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelMasterTaskLoopSimdDirectiveClass, OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
rwpng.c
/*--------------------------------------------------------------------------- pngquant: RGBA -> RGBA-palette quantization program rwpng.c --------------------------------------------------------------------------- © 1998-2000 by Greg Roelofs. © 2009-2014 by Kornel Lesiński. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "png.h" #include "rwpng.h" #if USE_LCMS #include "lcms2.h" #endif #ifndef Z_BEST_COMPRESSION #define Z_BEST_COMPRESSION 9 #endif #ifndef Z_BEST_SPEED #define Z_BEST_SPEED 1 #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #endif static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg); static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg); static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg); int rwpng_read_image24_cocoa(FILE *infile, png24_image *mainprog_ptr); void rwpng_version_info(FILE *fp) { const char *pngver = png_get_header_ver(NULL); #if USE_COCOA fprintf(fp, " Using libpng %s and Apple Cocoa image reader.\n", pngver); #elif USE_LCMS fprintf(fp, " Using libpng %s with Little CMS color profile support.\n", pngver); #else fprintf(fp, " Using libpng %s and Apple Cocoa image reader.\n", pngver); #endif #if PNG_LIBPNG_VER < 10600 if (strcmp(pngver, "1.3.") < 0) { fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n" "Please recompile pngquant with a newer version of libpng (1.5 or later).\n", fp); } #endif } struct rwpng_read_data { FILE *const fp; png_size_t bytes_read; }; static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr); png_size_t read = fread(data, 1, length, read_data->fp); if (!read) { png_error(png_ptr, "Read error"); } read_data->bytes_read += read; } struct rwpng_write_state { FILE *outfile; png_size_t maximum_file_size; png_size_t bytes_written; pngquant_error retval; }; static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr); if (SUCCESS != write_state->retval) { return; } if (write_state->maximum_file_size && write_state->bytes_written + length > write_state->maximum_file_size) { write_state->retval = TOO_LARGE_FILE; } if (!fwrite(data, 1, length, write_state->outfile)) { write_state->retval = CANT_WRITE_ERROR; } write_state->bytes_written += length; } static void user_flush_data(png_structp png_ptr) { // libpng never calls this :( } static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, unsigned int rowbytes) { if (!rowbytes) { rowbytes = png_get_rowbytes(png_ptr, info_ptr); } png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0])); if (!row_pointers) return NULL; for(unsigned int row = 0; row < height; ++row) { row_pointers[row] = base + row * rowbytes; } return row_pointers; } static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk) { if (0 == memcmp("iCCP", in_chunk->name, 5) || 0 == memcmp("cHRM", in_chunk->name, 5) || 0 == memcmp("gAMA", in_chunk->name, 5)) { return 0; // not handled } struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr); struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk)); memcpy(chunk->name, in_chunk->name, 5); chunk->size = in_chunk->size; chunk->location = in_chunk->location; chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL; if (in_chunk->size) { memcpy(chunk->data, in_chunk->data, in_chunk->size); } chunk->next = *head; *head = chunk; return 1; // marks as "handled", libpng won't store it } /* retval: 0 = success 21 = bad sig 22 = bad IHDR 24 = insufficient memory 25 = libpng error (via longjmp()) 26 = wrong PNG color type (no alpha channel) */ pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int verbose) { png_structp png_ptr = NULL; png_infop info_ptr = NULL; png_size_t rowbytes; int color_type, bit_depth; png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler); if (!png_ptr) { return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, NULL, NULL); return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a non-trivial * libpng function */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */ } png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback); struct rwpng_read_data read_data = {infile, 0}; png_set_read_fn(png_ptr, &read_data, user_read_data); png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */ /* alternatively, could make separate calls to png_get_image_width(), * etc., but want bit_depth and color_type for later [don't care about * compression_type and filter_type => NULLs] */ png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height, &bit_depth, &color_type, NULL, NULL, NULL); /* expand palette images to RGB, low-bit-depth grayscale images to 8 bits, * transparency chunks to full alpha channel; strip 16-bit-per-sample * images to 8 bits per sample; and convert grayscale to RGB[A] */ /* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */ if (!(color_type & PNG_COLOR_MASK_ALPHA)) { #ifdef PNG_READ_FILLER_SUPPORTED png_set_expand(png_ptr); png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER); #else fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->retval = 26; return mainprog_ptr->retval; #endif } if (bit_depth == 16) { png_set_strip_16(png_ptr); } if (!(color_type & PNG_COLOR_MASK_COLOR)) { png_set_gray_to_rgb(png_ptr); } /* get source gamma for gamma correction, or use sRGB default */ double gamma = 0.45455; if (!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) { png_get_gAMA(png_ptr, info_ptr, &gamma); } mainprog_ptr->gamma = gamma; png_set_interlace_handling(png_ptr); /* all transformations have been registered; now update info_ptr data, * get rowbytes and channels, and allocate image memory */ png_read_update_info(png_ptr, info_ptr); rowbytes = png_get_rowbytes(png_ptr, info_ptr); if ((mainprog_ptr->rgba_data = malloc(rowbytes*mainprog_ptr->height)) == NULL) { fprintf(stderr, "pngquant readpng: unable to allocate image data\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return PNG_OUT_OF_MEMORY_ERROR; } png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); /* now we can go ahead and just read the whole image */ png_read_image(png_ptr, row_pointers); /* and we're done! (png_read_end() can be omitted if no processing of * post-IDAT text/time/etc. is desired) */ png_read_end(png_ptr, NULL); #if USE_LCMS #if PNG_LIBPNG_VER < 10500 png_charp ProfileData; #else png_bytep ProfileData; #endif png_uint_32 ProfileLen; cmsHPROFILE hInProfile = NULL; /* color_type is read from the image before conversion to RGBA */ int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR; mainprog_ptr->lcms_status = NONE; /* embedded ICC profile */ if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) { hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen); cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile); /* only RGB (and GRAY) valid for PNGs */ if (colorspace == cmsSigRgbData && COLOR_PNG) { mainprog_ptr->lcms_status = ICCP; } else { if (colorspace == cmsSigGrayData && !COLOR_PNG) { mainprog_ptr->lcms_status = ICCP_WARN_GRAY; } cmsCloseProfile(hInProfile); hInProfile = NULL; } } /* build RGB profile from cHRM and gAMA */ if (hInProfile == NULL && COLOR_PNG && !png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) && png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) && png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) { cmsCIExyY WhitePoint; cmsCIExyYTRIPLE Primaries; png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y, &Primaries.Red.x, &Primaries.Red.y, &Primaries.Green.x, &Primaries.Green.y, &Primaries.Blue.x, &Primaries.Blue.y); WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0; cmsToneCurve *GammaTable[3]; GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma); hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable); cmsFreeToneCurve(GammaTable[0]); mainprog_ptr->lcms_status = GAMA_CHRM; } /* transform image to sRGB colorspace */ if (hInProfile != NULL) { cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile(); cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8, hOutProfile, TYPE_RGBA_8, INTENT_PERCEPTUAL, omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0); #pragma omp parallel for \ if (mainprog_ptr->height*mainprog_ptr->width > 8000) \ schedule(static) for (unsigned int i = 0; i < mainprog_ptr->height; i++) { /* It is safe to use the same block for input and output, when both are of the same TYPE. */ cmsDoTransform(hTransform, row_pointers[i], row_pointers[i], mainprog_ptr->width); } cmsDeleteTransform(hTransform); cmsCloseProfile(hOutProfile); cmsCloseProfile(hInProfile); mainprog_ptr->gamma = 0.45455; } #endif png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->file_size = read_data.bytes_read; mainprog_ptr->row_pointers = (unsigned char **)row_pointers; return SUCCESS; } static void rwpng_free_chunks(struct rwpng_chunk *chunk) { if (!chunk) return; rwpng_free_chunks(chunk->next); free(chunk->data); free(chunk); } void rwpng_free_image24(png24_image *image) { free(image->row_pointers); image->row_pointers = NULL; free(image->rgba_data); image->rgba_data = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } void rwpng_free_image8(png8_image *image) { free(image->indexed_data); image->indexed_data = NULL; free(image->row_pointers); image->row_pointers = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } pngquant_error rwpng_read_image24(FILE *infile, png24_image *input_image_p, int verbose) { #if USE_COCOA return rwpng_read_image24_cocoa(infile, input_image_p); #else return rwpng_read_image24_libpng(infile, input_image_p, verbose); #endif } static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression) { /* could also replace libpng warning-handler (final NULL), but no need: */ *png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL); if (!(*png_ptr_p)) { return LIBPNG_INIT_ERROR; /* out of memory */ } *info_ptr_p = png_create_info_struct(*png_ptr_p); if (!(*info_ptr_p)) { png_destroy_write_struct(png_ptr_p, NULL); return LIBPNG_INIT_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a PNG-writing * libpng function, unless an alternate error handler was installed-- * but compatible error handlers must either use longjmp() themselves * (as in this program) or exit immediately, so here we go: */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_write_struct(png_ptr_p, info_ptr_p); return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */ } png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION); png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better return SUCCESS; } void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers) { png_write_info(*png_ptr_p, *info_ptr_p); png_set_packing(*png_ptr_p); png_write_image(*png_ptr_p, row_pointers); png_write_end(*png_ptr_p, NULL); png_destroy_write_struct(png_ptr_p, info_ptr_p); } void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma) { /* remap sets gamma to 0.45455 */ png_set_gAMA(png_ptr, info_ptr, gamma); png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual } pngquant_error rwpng_write_image8(FILE *outfile, const png8_image *mainprog_ptr) { png_structp png_ptr; png_infop info_ptr; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression); if (retval) return retval; struct rwpng_write_state write_state; write_state = (struct rwpng_write_state){ .outfile = outfile, .maximum_file_size = mainprog_ptr->maximum_file_size, .retval = SUCCESS, }; png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data); // Palette images generally don't gain anything from filtering png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma); /* set the image parameters appropriately */ int sample_depth; #if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */ if (mainprog_ptr->num_palette <= 2) sample_depth = 1; else if (mainprog_ptr->num_palette <= 4) sample_depth = 2; else if (mainprog_ptr->num_palette <= 16) sample_depth = 4; else #endif sample_depth = 8; struct rwpng_chunk *chunk = mainprog_ptr->chunks; int chunk_num=0; while(chunk) { png_unknown_chunk pngchunk = { .size = chunk->size, .data = chunk->data, .location = chunk->location, }; memcpy(pngchunk.name, chunk->name, 5); png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1); #if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600 png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR); #endif chunk = chunk->next; chunk_num++; } png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, sample_depth, PNG_COLOR_TYPE_PALETTE, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_set_PLTE(png_ptr, info_ptr, &mainprog_ptr->palette[0], mainprog_ptr->num_palette); if (mainprog_ptr->num_trans > 0) { png_set_tRNS(png_ptr, info_ptr, mainprog_ptr->trans, mainprog_ptr->num_trans, NULL); } rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers); return write_state.retval; } pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr, int filter) { png_structp png_ptr; png_infop info_ptr; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0); if (retval) return retval; png_init_io(png_ptr, outfile); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma); png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, 8, PNG_COLOR_TYPE_RGB_ALPHA, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, filter); png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); rwpng_write_end(&info_ptr, &png_ptr, row_pointers); free(row_pointers); return SUCCESS; } static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) { fprintf(stderr, " %s\n", msg); } static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) { } static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg) { rwpng_png_image *mainprog_ptr; /* This function, aside from the extra step of retrieving the "error * pointer" (below) and the fact that it exists within the application * rather than within libpng, is essentially identical to libpng's * default error handler. The second point is critical: since both * setjmp() and longjmp() are called from the same code, they are * guaranteed to have compatible notions of how big a jmp_buf is, * regardless of whether _BSD_SOURCE or anything else has (or has not) * been defined. */ fprintf(stderr, " error: %s\n", msg); fflush(stderr); mainprog_ptr = png_get_error_ptr(png_ptr); if (mainprog_ptr == NULL) abort(); longjmp(mainprog_ptr->jmpbuf, 1); }