source
stringlengths
3
92
c
stringlengths
26
2.25M
data.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1999-2016 Bernard Parent Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <src/data.h> #include <model/_model.h> #include <cycle/_cycle.h> #define dt_steady 1.0e99 #define DATATYPE_BINARY 1 #define DATATYPE_ASCII 2 #ifdef _3DL #define SUBZONE_DESIRED_WIDTH 40 #else #define SUBZONE_DESIRED_WIDTH 40 #endif #define MIN_NUMSUBZONE_PER_THREAD 3 void find_NODEVALID_on_domain_all(np_t *np, gl_t *gl, int TYPELEVEL, bool *NODEVALID){ long i,j,k; #ifdef DISTMPI int rank,thisrank; int THISNODEVALID; #endif for_ijk(gl->domain_lim_all,is,js,ks,ie,je,ke){ NODEVALID[_ai_all(gl,i,j,k)]=FALSE; } #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); thisrank=_node_rank(gl, i, j, k); if (thisrank==rank) THISNODEVALID=(int)(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL)); MPI_Bcast(&THISNODEVALID,1,MPI_INT,thisrank,MPI_COMM_WORLD); assert(THISNODEVALID==TRUE || THISNODEVALID==FALSE); NODEVALID[_ai_all(gl,i,j,k)]=(bool)THISNODEVALID; } MPI_Barrier(MPI_COMM_WORLD); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ NODEVALID[_ai_all(gl,i,j,k)]=is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL); } #endif } void read_data_file_binary_ascii(char *filename, np_t *np, gl_t *gl, long level, int DATATYPE){ FILE *datafile; char data_format_str[100]; long i,j,k,flux,cnt,tmp1,tmp2,tmp3; double CFLmem; #ifdef _RESTIME_STORAGE_TRAPEZOIDAL flux_t Res; bool NORES=FALSE; long NOREScount=0; #endif #ifndef UNSTEADY double tmp_double; #endif bool FORMAT010; bool *NODEVALID; flux_t U; #ifdef EMFIELD double Lcmem; fluxemfield_t Uemfield; #endif #ifdef DISTMPI int rank,numproc; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Barrier(MPI_COMM_WORLD); #endif NODEVALID=(bool *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(bool)); CFLmem=gl->CFL; #ifdef EMFIELD Lcmem=gl->Lc; #endif datafile = fopen(filename, "r"); if (datafile==NULL) fatal_error("Having problems opening datafile %s.",filename); for (cnt=0; cnt<16; cnt++) { if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) fatal_error("Problem with fscanf in read_data_file_binary()."); } data_format_str[16]=EOS; wfprintf(stdout,"Reading data file %s ",filename); if (level!=0) wfprintf(stdout,"to time level minus %ld ",level); FORMAT010=FALSE; switch (DATATYPE){ case DATATYPE_BINARY: if (strcmp("WARPBINFORMAT010",data_format_str)==0) { wfprintf(stdout,"in CFDWARP binary format 010.."); FORMAT010=TRUE; } break; case DATATYPE_ASCII: if (strcmp("WARPASCFORMAT010",data_format_str)==0) { wfprintf(stdout,"in CFDWARP ASCII format 010.."); FORMAT010=TRUE; } break; } if (FORMAT010) { if (level==0) { if (fscanf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg", &(gl->window.is),&(gl->window.ie), &(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL))!=6) fatal_error("Problem with fscanf in read_data_file_binary()."); if (fscanf(datafile," nd=%ld ns=%ld nf=%ld", &tmp1,&tmp2, &tmp3)!=3) fatal_error("Problem with fscanf in read_data_file_binary()."); if (tmp1!=nd) fatal_error("Data file has %ld dimensions but CFDWARP is compiled with %ld dimensions.",tmp1,nd); if (tmp2!=ns) fatal_error("Data file has %ld species but CFDWARP is compiled with %ld species.",tmp2,ns); if (tmp3!=nf) fatal_error("Data file has %ld fluxes but CFDWARP is compiled with %ld fluxes.",tmp3,nf); if (fscanf(datafile," is=%ld ie=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary()."); if ((tmp2-tmp1)!=(gl->domain_all.ie-gl->domain_all.is)) fatal_error("Data file has %ld grid lines along i but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ie-gl->domain_all.is)); #ifdef _2DL if (fscanf(datafile," js=%ld je=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary()."); if ((tmp2-tmp1)!=(gl->domain_all.je-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along j but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.je-gl->domain_all.js)); #endif #ifdef _3DL if (fscanf(datafile," ks=%ld ke=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary()."); if ((tmp2-tmp1)!=(gl->domain_all.ke-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along k but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ke-gl->domain_all.ks)); #endif #if defined(UNSTEADY) if (fscanf(datafile," time=%lg",&(gl->time))!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_binary()."); #else if (fscanf(datafile," time=%lg",&tmp_double)!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_binary()."); #endif #ifdef UNSTEADY if (fscanf(datafile," dt=%lg",&(gl->dt))!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_binary()."); #else if (fscanf(datafile," dt=%lg",&tmp_double)!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_binary()."); #endif #ifdef EMFIELD if (fscanf(datafile," Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg",&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=3) fatal_error("Problem reading EMFIELD variables within fscanf in read_data_file_binary()."); #endif if (fscanf(datafile,"%*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_binary()."); } else { if (fscanf(datafile," %*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_binary()."); } fgetc(datafile); } if (!FORMAT010){ fatal_error("Data file format invalid."); } wfprintf(stdout,"fluid."); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); wfprintf(stdout,"."); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (rank==0) { #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: if (fread(U, sizeof(flux_t), 1, datafile)!=1) fatal_error("Could not read all data properly."); break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++){ if (fscanf(datafile,"%lg%*[^\n]",&(U[flux]))!=1) fatal_error("Could not read all data properly."); } break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } #ifdef DISTMPI } MPI_Bcast_Node(&U, nf, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl); if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); #endif if (is_node_in_zone(i,j,k,gl->domain_lim)) { for (flux=0; flux<nf; flux++){ if (level==0) np[_ai(gl,i,j,k)].bs->U[flux]=U[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Um1[flux]=U[flux]; #if _RESTIME_BW > 2 if (level==2) np[_ai(gl,i,j,k)].bs->Um2[flux]=U[flux]; #endif #if _RESTIME_BW > 3 if (level==3) np[_ai(gl,i,j,k)].bs->Um3[flux]=U[flux]; #endif #endif } np[_ai(gl,i,j,k)].INIT_FLUID=TRUE; } } #ifdef EMFIELD wfprintf(stdout,"emfield."); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_EMFIELD, NODEVALID); wfprintf(stdout,"."); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (rank==0) { #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: if (fread(Uemfield, sizeof(fluxemfield_t), 1, datafile)!=1) fatal_error("Could not read all data properly."); break; case DATATYPE_ASCII: for (flux=0; flux<nfe; flux++){ if (fscanf(datafile,"%lg%*[^\n]",&(Uemfield[flux]))!=1) fatal_error("Could not read all data properly."); } break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } #ifdef DISTMPI } MPI_Bcast_Node(&Uemfield, nfe, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl); if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); #endif if (is_node_in_zone(i,j,k,gl->domain_lim)) { for (flux=0; flux<nfe; flux++) { if (level==0) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Uemfield[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=Uemfield[flux]; #endif } np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE; } } #endif #ifdef _RESTIME_STORAGE_TRAPEZOIDAL wfprintf(stdout,"trap."); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); wfprintf(stdout,"."); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (rank==0) { #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: if (fread(Res, sizeof(flux_t), 1, datafile)!=1){ NORES=TRUE; NOREScount++; } break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++){ if (fscanf(datafile,"%lg%*[^\n]",&(Res[flux]))!=1){ NORES=TRUE; NOREScount++; } } break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } #ifdef DISTMPI } MPI_Bcast(&NORES, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD); if (!NORES) MPI_Bcast_Node(&Res, nf, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl); if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); #endif if (is_node_in_zone(i,j,k,gl->domain_lim)) { for (flux=0; flux<nf; flux++){ if (!NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=Res[flux]; else if (NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=0.0; } } NORES=FALSE; } if(NOREScount>0) wfprintf(stdout,"WARNING: The residual at the previous time step could not be found within the data file %s. The residual has been set to zero..",filename); #endif fclose(datafile); wfprintf(stdout,"done;\n"); if (level!=0) gl->CFL=CFLmem; #ifdef EMFIELD if (level!=0) gl->Lc=Lcmem; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); if (rank!=0) { gl->effiter_U=0.0; gl->effiter_R=0.0; #ifdef EMFIELD gl->effiter_U_emfield=0.0; gl->effiter_R_emfield=0.0; #endif } #endif free(NODEVALID); } void write_data_file_binary_ascii(char *filename, np_t *np, gl_t *gl, int DATATYPE){ FILE *datafile; long i,j,k; flux_t *fluxtmp; bool *NODEVALID; #ifdef EMFIELD double effiter_U_emfield,effiter_R_emfield; #endif long flux; double effiter_U,effiter_R; #ifdef DISTMPI zone_t domain; flux_t U; int rank,proc,numproc; MPI_Status MPI_Status1; #endif #ifdef EMFIELD assert(nf>=nfe); #endif fluxtmp=(flux_t *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(flux_t)); NODEVALID=(bool *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(bool)); effiter_U=gl->effiter_U; effiter_R=gl->effiter_R; #ifdef EMFIELD effiter_U_emfield=gl->effiter_U_emfield; effiter_R_emfield=gl->effiter_R_emfield; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif #endif datafile = wfopen(filename, "w"); wfprintf(stdout,"Writing to CFDWARP "); switch (DATATYPE){ case DATATYPE_BINARY: wfprintf(stdout,"binary"); wfprintf(datafile,"WARPBINFORMAT010"); break; case DATATYPE_ASCII: wfprintf(stdout,"ASCII"); wfprintf(datafile,"WARPASCFORMAT010"); break; default: fatal_error("DATATYPE must be either DATATYPE_BINARY or DATATYPE_ASCII."); } wfprintf(stdout," data file %s..",filename); wfprintf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E", gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL); wfprintf(datafile," nd=%ld ns=%ld nf=%ld",nd,ns,nf); wfprintf(datafile," is=%ld ie=%ld",gl->domain_all.is,gl->domain_all.ie); #ifdef _2DL wfprintf(datafile," js=%ld je=%ld",gl->domain_all.js,gl->domain_all.je); #endif #ifdef _3DL wfprintf(datafile," ks=%ld ke=%ld",gl->domain_all.ks,gl->domain_all.ke); #endif #if defined(UNSTEADY) wfprintf(datafile," time=%E",gl->time); #else wfprintf(datafile," time=%E",0.0); #endif #ifdef UNSTEADY wfprintf(datafile," dt=%E",gl->dt); #else wfprintf(datafile," dt=%E",dt_steady); #endif #ifdef EMFIELD wfprintf(datafile," Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E",gl->Lc,effiter_U_emfield,effiter_R_emfield); #endif wfprintf(datafile,"\n"); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); #ifdef DISTMPI for (proc=0; proc<numproc; proc++){ domain=_domain_from_rank(proc,gl); for_ijk(domain,is,js,ks,ie,je,ke){ if (proc==rank){ for (flux=0; flux<nf; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->U[flux]; if (proc!=0) { MPI_Send(U,nf,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && proc!=0) { MPI_Recv(U,nf,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1); } for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux]; } MPI_Barrier(MPI_COMM_WORLD); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->U[flux]; } #endif for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile); break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++) wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]); break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } } #ifdef EMFIELD find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_EMFIELD, NODEVALID); #ifdef DISTMPI for (proc=0; proc<numproc; proc++){ domain=_domain_from_rank(proc,gl); for_ijk(domain,is,js,ks,ie,je,ke){ if (proc==rank){ for (flux=0; flux<nfe; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; if (proc!=0) { MPI_Send(U,nfe,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && proc!=0) { MPI_Recv(U,nfe,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1); } for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux]; } MPI_Barrier(MPI_COMM_WORLD); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; } #endif for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(fluxemfield_t), 1, datafile); break; case DATATYPE_ASCII: for (flux=0; flux<nfe; flux++) wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]); break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } } #endif #ifdef _RESTIME_STORAGE_TRAPEZOIDAL find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); #ifdef DISTMPI for (proc=0; proc<numproc; proc++){ domain=_domain_from_rank(proc,gl); for_ijk(domain,is,js,ks,ie,je,ke){ if (proc==rank){ for (flux=0; flux<nf; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]; if (proc!=0) { MPI_Send(U,nf,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && proc!=0) { MPI_Recv(U,nf,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1); } for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux]; } MPI_Barrier(MPI_COMM_WORLD); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]; } #endif for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile); break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++) wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]); break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } } #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); #endif wfclose(datafile); wfprintf(stdout,"done.\n"); free(fluxtmp); free(NODEVALID); } void write_data_file(np_t *np, gl_t *gl){ char *tmp_filename,*tmp_filename2; #ifdef DISTMPI int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif tmp_filename=(char *)malloc((strlen(gl->output_filename)+10)*sizeof(char)); tmp_filename2=(char *)malloc((strlen(gl->output_filename)+10)*sizeof(char)); strcpy(tmp_filename,gl->output_filename); SOAP_strins(".wbak",&tmp_filename,strlen(tmp_filename)); strcpy(tmp_filename2,gl->output_filename); SOAP_strins(".wbak2",&tmp_filename2,strlen(tmp_filename2)); #ifdef DISTMPI if (rank==0) { #endif rename(tmp_filename,tmp_filename2); rename(gl->output_filename,tmp_filename); #ifdef DISTMPI } #endif if (gl->OUTPUTASCII) { write_data_file_binary_ascii(gl->output_filename, np, gl, DATATYPE_ASCII); } else { if (gl->OUTPUTINTERPOLATION){ write_data_file_interpolation(gl->output_filename, np, gl); } else { write_data_file_binary_ascii(gl->output_filename, np, gl, DATATYPE_BINARY); } } free(tmp_filename); free(tmp_filename2); } void read_data_file(input_t input, np_t *np, gl_t *gl){ #ifdef UNSTEADY long i,j,k; long flux; #endif long spec; gl->nsinit=ns; for (spec=0; spec<ns; spec++) gl->initspecies[spec]=spec; if (input.READDATAFILE) { if (input.ASCII) { read_data_file_binary_ascii(input.name, np, gl, 0, DATATYPE_ASCII); } else { if (input.INTERPOLATION){ read_data_file_interpolation(input.name, np, gl); } else { read_data_file_binary_ascii(input.name, np, gl, 0, DATATYPE_BINARY); } } gl->iter=max(gl->iter,1); gl->INIT_FLUID_READ=TRUE; gl->INIT_EMFIELD_READ=TRUE; } #ifdef UNSTEADY for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++){ (np)[_ai(gl,i,j,k)].bs->Um1[flux]=(np)[_ai(gl,i,j,k)].bs->U[flux]; } #ifdef EMFIELD for (flux=0; flux<nfe; flux++){ (np)[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=(np)[_ai(gl,i,j,k)].bs->Uemfield[flux]; } #endif } if (input.M1) read_data_file_binary_ascii(input.name_m1, np, gl, 1, DATATYPE_BINARY); #if _RESTIME_BW > 2 for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++){ (np)[_ai(gl,i,j,k)].bs->Um2[flux]=(np)[_ai(gl,i,j,k)].bs->Um1[flux]; } } if (input.M2) read_data_file_binary_ascii(input.name_m2, np, gl, 2, DATATYPE_BINARY); #endif #if _RESTIME_BW > 3 for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++){ (np)[_ai(gl,i,j,k)].bs->Um3[flux]=(np)[_ai(gl,i,j,k)].bs->Um2[flux]; } } if (input.M3) read_data_file_binary_ascii(input.name_m3, np, gl, 3, DATATYPE_BINARY); #endif #endif } void find_interpolation_weight(np_t *np, gl_t *gl, long l, dim_t x_file, dim_t dx1_file, #ifdef _2DL dim_t dx2_file, #endif #ifdef _3DL dim_t dx3_file, #endif double radiusmax2, double *thisweight){ double distance; EXM_mat_t mat1,mat2,mat3,mat1inv; long dim; distance=0.0; for (dim=0; dim<nd; dim++) distance+=sqr(_x(np[l],dim)-x_file[dim]); *thisweight=0.0; if (distance<radiusmax2) { EXM_init_matrix(&mat1, nd, nd); for (dim=0; dim<nd; dim++){ mat1.cont[EXM_aim(mat1.glm,dim,0)]=dx1_file[dim]; #ifdef _2DL mat1.cont[EXM_aim(mat1.glm,dim,1)]=dx2_file[dim]; #endif #ifdef _3DL mat1.cont[EXM_aim(mat1.glm,dim,2)]=dx3_file[dim]; #endif } EXM_init_matrix(&mat1inv, nd, nd); EXM_invert_matrix_analytical(mat1, &mat1inv); EXM_init_matrix(&mat2, nd, 1); for (dim=0; dim<nd; dim++){ mat2.cont[EXM_aim(mat2.glm,dim,0)]=_x(np[l],dim)-x_file[dim]; } EXM_init_matrix(&mat3, nd, 1); EXM_multiply_matrices(mat1inv, mat2, &mat3); *thisweight=0.0; //for (dim=0; dim<nd; dim++) thisweight=max(thisweight,fabs(mat3.cont[EXM_aim(mat3.glm,dim,0)])); for (dim=0; dim<nd; dim++) *thisweight+=fabs(pow(fabs(mat3.cont[EXM_aim(mat3.glm,dim,0)]),3.0)); *thisweight=fabs(pow(*thisweight,1.0/3.0)); *thisweight=max(1e-16,max(0.0003-(*thisweight)*0.00001,1.0-(*thisweight))); EXM_free_matrix(&mat1); EXM_free_matrix(&mat1inv); EXM_free_matrix(&mat2); EXM_free_matrix(&mat3); } } bool is_interpolation_occurring_in_zone(np_t *np, gl_t *gl, int TYPELEVEL, dim_t x_file, double radiusmax2, zone_t zone, long *i, long *j, long *k){ long dim; double distance; bool FOUND; FOUND=FALSE; // fprintf(stderr,"zone.is=%ld zone.ie=%ld\n",zone.is,zone.ie); //if (zone.ie>gl->domain_all.ie) fatal_error("problem here.."); //if (zone.is<gl->domain_all.is) fatal_error("problem here.."); *i=zone.is-1; do { (*i)++; *j=zone.js-1; do { (*j)++; #ifdef _3DL *k=zone.ks-1; do { (*k)++; #endif if (is_node_valid(np[_ai(gl,*i,*j,*k)],TYPELEVEL)){ distance=0.0; for (dim=0; dim<nd; dim++) distance+=sqr(_x(np[_ai(gl,*i,*j,*k)],dim)-x_file[dim]); if (distance<radiusmax2) { //if (zone.is==zone.ie) fprintf(stderr,"[[%E %E %ld,%ld,%ld %E %E]]\n",distance,radiusmax2,*i,*j,*k,_x(np[_ai(gl,*i,*j,*k)],0),x_file[0]); FOUND=TRUE; } } #ifdef _3DL } while(!FOUND && *k<zone.ke); #endif } while(!FOUND && *j<zone.je); } while(!FOUND && *i<zone.ie); return(FOUND); } bool find_interpolation_zone(np_t *np, gl_t *gl, int TYPELEVEL, dim_t x_file, double radiusmax2, zone_t *zone){ long i,j,k,offset,imem; bool FOUNDWITHIN,FOUNDLEFT,FOUNDRIGHT,FOUND; zone_t istationzone,domain_eff; domain_eff=_zone_intersection(gl->domain_all,gl->domain_lim); FOUNDWITHIN=FALSE; FOUNDLEFT=FALSE; FOUNDRIGHT=FALSE; istationzone=domain_eff; if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,*zone,&i,&j,&k)){ istationzone.is=i; istationzone.ie=i; FOUNDWITHIN=TRUE; } else { offset=0; do { offset++; if (zone->ie+offset<=domain_eff.ie) { istationzone.ie=zone->ie+offset; istationzone.is=zone->ie+offset; if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k)){ FOUNDLEFT=TRUE; } } if (zone->is-offset>=domain_eff.is && !FOUNDLEFT) { istationzone.ie=zone->is-offset; istationzone.is=zone->is-offset; if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k)){ FOUNDRIGHT=TRUE; } } } while (!FOUNDLEFT && !FOUNDRIGHT && offset<=(domain_eff.ie-domain_eff.is+1) ); } if (FOUNDRIGHT || FOUNDLEFT || FOUNDWITHIN){ imem=istationzone.is; if (FOUNDRIGHT){ zone->ie=imem; } else { FOUND=TRUE; while (istationzone.ie<domain_eff.ie && FOUND) { istationzone.ie++; istationzone.is++; FOUND=is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k); } if (!FOUND) zone->ie=istationzone.ie-1; else zone->ie=domain_eff.ie; } if (FOUNDLEFT){ zone->is=imem; } else { istationzone.is=imem; istationzone.ie=imem; FOUND=TRUE; while (istationzone.is>domain_eff.is && FOUND) { istationzone.is--; istationzone.ie--; FOUND=is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k); } if (!FOUND) zone->is=istationzone.is+1; else zone->is=domain_eff.is; } } return(FOUNDRIGHT || FOUNDLEFT || FOUNDWITHIN); } bool is_data_point_in_domain(dim_t x_file, dim_t xmin, dim_t xmax, double radiusmax2){ bool INDOMAIN; long dim; INDOMAIN=TRUE; for (dim=0; dim<nd; dim++) { if (INDOMAIN) { if (x_file[dim]<xmin[dim] && sqr(x_file[dim]-xmin[dim])>radiusmax2) INDOMAIN=FALSE; } } for (dim=0; dim<nd; dim++) { if (INDOMAIN) { if (x_file[dim]>xmax[dim] && sqr(x_file[dim]-xmax[dim])>radiusmax2) INDOMAIN=FALSE; } } return(INDOMAIN); } void read_data_file_interpolation(char *filename, np_t *np, gl_t *gl){ FILE *datafile; char data_format_str[100]; long i,j,k,l_file,cnt,dim,cntzone; long numsubzone, numflux_read,numspec_read,numdim_read,numnodes; double tmp_dt,tmp_time; double *weight,*radiusmax2_file,thisweight; zone_t *subzone; dim_t *xmin,*xmax; initvar_t *initvar; initvar_t *initvar_file; zone_t zone; long numsubzone_desired; #ifdef EMFIELD initvar_emfield_t *initvar_emfield; initvar_emfield_t *initvar_emfield_file; #endif bool FORMAT001; dim_t *dx1_file,*x_file; #ifdef _2DL dim_t *dx2_file; #endif #ifdef _3DL dim_t *dx3_file; #endif int cnterror; #ifdef OPENMPTHREADS omp_lock_t *nodelock; #endif #ifdef DISTMPI int rank,numproc; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Barrier(MPI_COMM_WORLD); #endif weight=(double *)malloc(sizeof(double)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); #ifdef OPENMPTHREADS nodelock=(omp_lock_t *)malloc(sizeof(double)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); #endif datafile = fopen(filename, "r"); if (datafile==NULL) fatal_error("Having problems opening interpolation datafile %s.",filename); /* first do the fluid properties */ initvar=(initvar_t *)malloc(sizeof(initvar_t)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); for (cnt=0; cnt<16; cnt++){ if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) { fatal_error("Problem with fscanf in read_data_file_interpolation()."); } } data_format_str[16]=EOS; wfprintf(stdout,"Reading interpolation data file %s ",filename); FORMAT001=FALSE; if (strcmp("WARPINTFORMAT001",data_format_str)==0) { wfprintf(stdout,"in CFDWARP format 001.."); FORMAT001=TRUE; } if (FORMAT001) { if (fscanf(datafile," numnodes=%ld nf=%ld nd=%ld ns=%ld windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg time=%lg dt=%lg%*[^\n]", &numnodes,&numflux_read,&numdim_read,&numspec_read,&(gl->window.is),&(gl->window.ie), &(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL),&(tmp_time),&tmp_dt)!=12) fatal_error("Problem reading interpolation data file."); #ifdef UNSTEADY gl->time=tmp_time; gl->dt=tmp_dt; #endif fgetc(datafile); if (numdim_read!=nd) fatal_error("Number of dimensions read (%ld) does not equal current number of dimensions (%ld).",numdim_read,nd); if (numspec_read!=ns) fatal_error("Number of species read (%ld) does not equal current number of species (%ld).",numspec_read,ns); if (numflux_read!=nf) fatal_error("Number of fluxes read (%ld) does not equal current number of fluxes (%ld).",numflux_read,nf); } else { fatal_error("Interpolation file format unknown."); } /* read data and store in ram */ initvar_file=(initvar_t *)malloc(numnodes*sizeof(initvar_t)); x_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); dx1_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); #ifdef _2DL dx2_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); #endif #ifdef _3DL dx3_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); #endif radiusmax2_file=(double *)malloc(numnodes*sizeof(double)); for (l_file=0; l_file<numnodes; l_file++){ cnterror=0; if (fread(initvar_file[l_file], sizeof(initvar_t), 1, datafile)!=1) cnterror++; if (fread(x_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; if (fread(dx1_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #ifdef _2DL if (fread(dx2_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif #ifdef _3DL if (fread(dx3_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif if (cnterror>0) fatal_error("Could not read all data properly."); radiusmax2_file[l_file]=0.0e0; for (dim=0; dim<nd; dim++) radiusmax2_file[l_file]+=sqr(fabs(dx1_file[l_file][dim]) #ifdef _2DL +fabs(dx2_file[l_file][dim]) #endif #ifdef _3DL +fabs(dx3_file[l_file][dim]) #endif ); radiusmax2_file[l_file]*=1.1; } for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ weight[_ai(gl,i,j,k)]=0.0e0; #ifdef OPENMPTHREADS omp_init_lock(&(nodelock[_ai(gl,i,j,k)])); #endif for (cnt=0; cnt<numinitvar; cnt++) (initvar[_ai(gl,i,j,k)])[cnt]=0.0; } zone=_zone_intersection(gl->domain_all,gl->domain_lim); subzone=(zone_t *)malloc(sizeof(zone_t)); find_subzones_in_zone_given_zonelength(SUBZONE_DESIRED_WIDTH, zone, &numsubzone, &subzone); #ifdef OPENMPTHREADS numsubzone_desired=MIN_NUMSUBZONE_PER_THREAD*omp_get_max_threads(); #else numsubzone_desired=MIN_NUMSUBZONE_PER_THREAD; #endif if (numsubzone<numsubzone_desired) find_subzones_in_zone_given_numsubzone(zone, numsubzone_desired, &numsubzone, &subzone); xmin=(dim_t *)malloc(numsubzone*sizeof(dim_t)); xmax=(dim_t *)malloc(numsubzone*sizeof(dim_t)); for (cntzone=0; cntzone<numsubzone; cntzone++){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=1e99; xmax[cntzone][dim]=-1e99; } for_ijk(subzone[cntzone],is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=min(xmin[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); xmax[cntzone][dim]=max(xmax[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); } } } } #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); wfprintf(stdout,"Fluid/%ld",numsubzone*numproc); #else wfprintf(stdout,"Fluid/%ld",numsubzone); #endif #if defined(OPENMPTHREADS) #pragma omp parallel for private(l_file,cntzone,dim,zone,i,j,k,cnt,thisweight) schedule(dynamic) #endif for (cntzone=0; cntzone<numsubzone; cntzone++){ for (l_file=0; l_file<numnodes; l_file++){ if (is_data_point_in_domain(x_file[l_file],xmin[cntzone],xmax[cntzone],radiusmax2_file[l_file])){ zone=subzone[cntzone]; if (find_interpolation_zone(np,gl,TYPELEVEL_FLUID,x_file[l_file],radiusmax2_file[l_file],&zone)){ for_jik(zone,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)){ find_interpolation_weight(np,gl,_ai(gl,i,j,k),x_file[l_file],dx1_file[l_file], #ifdef _2DL dx2_file[l_file], #endif #ifdef _3DL dx3_file[l_file], #endif radiusmax2_file[l_file],&thisweight); #ifdef OPENMPTHREADS omp_set_lock(&(nodelock[_ai(gl,i,j,k)])); #endif if (thisweight>1e-99) { weight[_ai(gl,i,j,k)]+=thisweight; for (cnt=0; cnt<numinitvar; cnt++) initvar[_ai(gl,i,j,k)][cnt]+=thisweight*initvar_file[l_file][cnt]; } #ifdef OPENMPTHREADS omp_unset_lock(&(nodelock[_ai(gl,i,j,k)])); #endif } } } } } fprintf(stdout,"."); fflush(stdout); // if (mod(cntzone,numsubzone/100+1)==0) wfprintf(stdout,"."); } #ifdef OPENMPTHREADS #pragma omp parallel for private(i,j,k,cnt) schedule(static) #endif for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (weight[_ai(gl,i,j,k)]>1e-99 && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) { for (cnt=0; cnt<numinitvar; cnt++) initvar[_ai(gl,i,j,k)][cnt]=initvar[_ai(gl,i,j,k)][cnt]/weight[_ai(gl,i,j,k)]; init_node_fluid(np,_ai(gl,i,j,k), gl, defaultinitvartypefluid, initvar[_ai(gl,i,j,k)]); np[_ai(gl,i,j,k)].INIT_FLUID=TRUE; } } free(initvar); free(initvar_file); /* second do the emfield properties */ #ifdef EMFIELD initvar_emfield=(initvar_emfield_t *)malloc(sizeof(initvar_emfield_t)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); for (cnt=0; cnt<16; cnt++){ if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1){ fatal_error("Problem with fscanf in emfield part of read_data_file_interpolation()."); } } data_format_str[16]=EOS; FORMAT001=FALSE; if (strcmp("WARPINTFORMAT001",data_format_str)==0) { FORMAT001=TRUE; } if (FORMAT001) { if (fscanf(datafile," numnodes_emfield=%ld nfe=%ld nd=%ld Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg%*[^\n]", &numnodes,&numflux_read,&numdim_read,&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=6){ fatal_error("Problem reading emfield preambule in interpolating file."); } fgetc(datafile); if (numdim_read!=nd) fatal_error("Number of dimensions read (%ld) does not equal current number of dimensions (%ld).",numdim_read,nd); if (numflux_read!=nfe) fatal_error("Number of fluxes read (%ld) does not equal current number of emfield fluxes (%ld).",numflux_read,nfe); gl->Lc=1.0e0; } else { fatal_error("Interpolation file format unknown for EMfield variables."); } /* read data and store in ram */ initvar_emfield_file=(initvar_emfield_t *)malloc(numnodes*sizeof(initvar_emfield_t)); x_file=(dim_t *)realloc(x_file,numnodes*sizeof(dim_t)); dx1_file=(dim_t *)realloc(dx1_file,numnodes*sizeof(dim_t)); #ifdef _2DL dx2_file=(dim_t *)realloc(dx2_file,numnodes*sizeof(dim_t)); #endif #ifdef _3DL dx3_file=(dim_t *)realloc(dx3_file,numnodes*sizeof(dim_t)); #endif radiusmax2_file=(double *)realloc(radiusmax2_file,numnodes*sizeof(double)); for (l_file=0; l_file<numnodes; l_file++){ cnterror=0; if (fread(initvar_emfield_file[l_file], sizeof(initvar_emfield_t), 1, datafile)!=1) cnterror++; if (fread(x_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; if (fread(dx1_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #ifdef _2DL if (fread(dx2_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif #ifdef _3DL if (fread(dx3_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif if (cnterror>0) fatal_error("Could not read all data properly."); radiusmax2_file[l_file]=0.0e0; for (dim=0; dim<nd; dim++) radiusmax2_file[l_file]+=sqr(fabs(dx1_file[l_file][dim]) #ifdef _2DL +fabs(dx2_file[l_file][dim]) #endif #ifdef _3DL +fabs(dx3_file[l_file][dim]) #endif ); radiusmax2_file[l_file]*=1.1; } for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ weight[_ai(gl,i,j,k)]=0.0e0; for (cnt=0; cnt<numinitvar_emfield; cnt++) (initvar_emfield[_ai(gl,i,j,k)])[cnt]=0.0; } for (cntzone=0; cntzone<numsubzone; cntzone++){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=1e99; xmax[cntzone][dim]=-1e99; } for_ijk(subzone[cntzone],is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=min(xmin[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); xmax[cntzone][dim]=max(xmax[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); } } } } #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); wfprintf(stdout,"EMfield/%ld",numsubzone*numproc); #else wfprintf(stdout,"EMfield/%ld",numsubzone); #endif #if defined(OPENMPTHREADS) //&& !defined(DISTMPI) #pragma omp parallel for private(l_file,cntzone,cnt,thisweight,dim,zone,i,j,k) schedule(dynamic) #endif for (cntzone=0; cntzone<numsubzone; cntzone++){ for (l_file=0; l_file<numnodes; l_file++){ if (is_data_point_in_domain(x_file[l_file],xmin[cntzone],xmax[cntzone],radiusmax2_file[l_file])){ zone=subzone[cntzone]; if (find_interpolation_zone(np,gl,TYPELEVEL_EMFIELD,x_file[l_file],radiusmax2_file[l_file],&zone)){ for_jik(zone,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ find_interpolation_weight(np,gl,_ai(gl,i,j,k),x_file[l_file],dx1_file[l_file], #ifdef _2DL dx2_file[l_file], #endif #ifdef _3DL dx3_file[l_file], #endif radiusmax2_file[l_file],&thisweight); #ifdef OPENMPTHREADS omp_set_lock(&(nodelock[_ai(gl,i,j,k)])); #endif if (thisweight>1e-99) { weight[_ai(gl,i,j,k)]+=thisweight; for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[_ai(gl,i,j,k)][cnt]+=thisweight*initvar_emfield_file[l_file][cnt]; } #ifdef OPENMPTHREADS omp_unset_lock(&(nodelock[_ai(gl,i,j,k)])); #endif } } } } } // if (mod(cntzone,numsubzone/100+1)==0) wfprintf(stdout,"."); fprintf(stdout,"."); fflush(stdout); } #ifdef OPENMPTHREADS #pragma omp parallel for private(i,j,k,cnt) schedule(static) #endif for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (weight[_ai(gl,i,j,k)]>1e-99 && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[_ai(gl,i,j,k)][cnt]=initvar_emfield[_ai(gl,i,j,k)][cnt]/weight[_ai(gl,i,j,k)]; init_node_emfield(np[_ai(gl,i,j,k)], gl, defaultinitvartypeemfield, initvar_emfield[_ai(gl,i,j,k)]); np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE; } } free(initvar_emfield); free(initvar_emfield_file); #endif //EMFIELD free(subzone); free(xmin); free(xmax); fclose(datafile); free(weight); #ifdef OPENMPTHREADS for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ omp_destroy_lock(&(nodelock[_ai(gl,i,j,k)])); } free(nodelock); #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); if (rank!=0) { gl->effiter_U=0.0; gl->effiter_R=0.0; #ifdef EMFIELD gl->effiter_U_emfield=0.0; gl->effiter_R_emfield=0.0; #endif } #endif wfprintf(stdout,"done;\n"); free(x_file); free(dx1_file); #ifdef _2DL free(dx2_file); #endif #ifdef _3DL free(dx3_file); #endif free(radiusmax2_file); } void write_data_file_interpolation(char *filename, np_t *np, gl_t *gl){ FILE *datafile; long i,j,k,cnt; dim_t dx1,x; #ifdef _2DL dim_t dx2; #endif #ifdef _3DL dim_t dx3; #endif double tmp_time, tmp_dt; long numnodes,dim; int TYPELEVEL,pass,passmax; bool *NODEVALID; initvar_t initvar; double effiter_U,effiter_R; #ifdef EMFIELD double effiter_U_emfield,effiter_R_emfield; initvar_emfield_t initvar_emfield; #endif #ifdef DISTMPI int rank; MPI_Status MPI_Status1; #endif /* nodes may be suspended. Hence, ensure that appropriate nodes are resumed. */ resume_nodes_in_zone(np,gl,gl->domain); NODEVALID=(bool *)malloc(sizeof(bool)*(gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif ); effiter_U=gl->effiter_U; effiter_R=gl->effiter_R; #ifdef EMFIELD effiter_U_emfield=gl->effiter_U_emfield; effiter_R_emfield=gl->effiter_R_emfield; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif #endif datafile = wfopen(filename, "w"); wfprintf(stdout,"Writing to CFDWARP interpolation data file %s..",filename); #ifdef EMFIELD passmax=2; #else passmax=1; #endif for (pass=1; pass<=passmax; pass++){ if (pass==1){ TYPELEVEL=TYPELEVEL_FLUID; } else { #ifdef EMFIELD TYPELEVEL=TYPELEVEL_EMFIELD; #endif } #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); #endif find_NODEVALID_on_domain_all(np, gl, TYPELEVEL, NODEVALID); numnodes=0; for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { numnodes++; } } if (pass==1){ #ifdef UNSTEADY tmp_time=gl->time; tmp_dt=gl->dt; #else tmp_time=0.0; tmp_dt=dt_steady; #endif wfprintf(datafile,"WARPINTFORMAT001 numnodes=%ld nf=%ld nd=%ld ns=%ld windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E time=%E dt=%E\n",numnodes,nf,nd, ns,gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL,tmp_time,tmp_dt); } else { #ifdef EMFIELD wfprintf(datafile,"WARPINTFORMAT001 numnodes_emfield=%ld nfe=%ld nd=%ld Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E\n",numnodes,nfe,nd,gl->Lc,effiter_U_emfield,effiter_R_emfield); #endif } for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (pass==1){ if (_node_rank(gl,i,j,k)==rank) { if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar(np, gl, _ai(gl,i,j,k), initvar); } else { for (cnt=0; cnt<numinitvar; cnt++) initvar[cnt]=0.0; } if (rank!=0) { MPI_Ssend(initvar,numinitvar,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && _node_rank(gl,i,j,k)!=0){ MPI_Recv(initvar,numinitvar,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); } } else { #ifdef EMFIELD if (_node_rank(gl,i,j,k)==rank) { if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar_emfield(np, gl, _ai(gl,i,j,k),initvar_emfield); } else { for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[cnt]=0.0; } if (rank!=0) { MPI_Ssend(initvar_emfield,numinitvar_emfield,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && _node_rank(gl,i,j,k)!=0){ MPI_Recv(initvar_emfield,numinitvar_emfield,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); } #endif } #else if (pass==1){ if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar(np, gl, _ai(gl,i,j,k), initvar); } else { for (cnt=0; cnt<numinitvar; cnt++) initvar[cnt]=0.0; } } else { #ifdef EMFIELD if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar_emfield(np, gl, _ai(gl,i,j,k), initvar_emfield); } else { for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[cnt]=0.0; } #endif } #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { #ifdef DISTMPI if (_node_rank(gl,i,j,k)==rank) { #endif for (dim=0; dim<nd; dim++) x[dim]=_x(np[_ai(gl,i,j,k)],dim); for (dim=0; dim<nd; dim++){ if ((i<gl->domain_all.ie && NODEVALID[_ai_all(gl,i+1,j,k)]) && (i>gl->domain_all.is && NODEVALID[_ai_all(gl,i-1,j,k)])) { dx1[dim]=0.5*(np[_ai(gl,i+1,j,k)].bs->x[dim]-np[_ai(gl,i-1,j,k)].bs->x[dim]); } else { if (i<gl->domain_all.ie && NODEVALID[_ai_all(gl,i+1,j,k)]) { dx1[dim]=(np[_ai(gl,i+1,j,k)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]); } else { if (i>gl->domain_all.is && NODEVALID[_ai_all(gl,i-1,j,k)]) { dx1[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i-1,j,k)].bs->x[dim]); } else { fatal_error("Couldn't find adjacent valid node along i needed for interpolation."); } } } #ifdef _2DL if ((j<gl->domain_all.je && NODEVALID[_ai_all(gl,i,j+1,k)]) && (j>gl->domain_all.js && NODEVALID[_ai_all(gl,i,j-1,k)])) { dx2[dim]=0.5*(np[_ai(gl,i,j+1,k)].bs->x[dim]-np[_ai(gl,i,j-1,k)].bs->x[dim]); } else { if (j<gl->domain_all.je && NODEVALID[_ai_all(gl,i,j+1,k)]) { dx2[dim]=(np[_ai(gl,i,j+1,k)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]); } else { if (j>gl->domain_all.js && NODEVALID[_ai_all(gl,i,j-1,k)]) { dx2[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i,j-1,k)].bs->x[dim]); } else { fatal_error("Couldn't find adjacent valid node along j needed for interpolation."); } } } #endif #ifdef _3DL if ((k<gl->domain_all.ke && NODEVALID[_ai_all(gl,i,j,k+1)]) && (k>gl->domain_all.ks && NODEVALID[_ai_all(gl,i,j,k-1)])) { dx3[dim]=0.5*(np[_ai(gl,i,j,k+1)].bs->x[dim]-np[_ai(gl,i,j,k-1)].bs->x[dim]); } else { if (k<gl->domain_all.ke && NODEVALID[_ai_all(gl,i,j,k+1)]) { dx3[dim]=(np[_ai(gl,i,j,k+1)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]); } else { if (k>gl->domain_all.ks && NODEVALID[_ai_all(gl,i,j,k-1)]) { dx3[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i,j,k-1)].bs->x[dim]); } else { fatal_error("Couldn't find adjacent valid node along k needed for interpolation."); } } } #endif } #ifdef DISTMPI } if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(x,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(x,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx1,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx1,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); #ifdef _2DL if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx2,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx2,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); #endif #ifdef _3DL if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx3,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx3,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); #endif #endif if (pass==1) { wfwrite(initvar, sizeof(initvar_t), 1, datafile); } else { #ifdef EMFIELD wfwrite(initvar_emfield, sizeof(initvar_emfield_t), 1, datafile); #endif } wfwrite(x, sizeof(dim_t), 1, datafile); wfwrite(dx1, sizeof(dim_t), 1, datafile); #ifdef _2DL wfwrite(dx2, sizeof(dim_t), 1, datafile); #endif #ifdef _3DL wfwrite(dx3, sizeof(dim_t), 1, datafile); #endif } //end if nodevalid } // for_ijk }//pass #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); #endif wfclose(datafile); wfprintf(stdout,"done.\n"); free(NODEVALID); }
relu6_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_relu6_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int batch = input_tensor->dims[0]; int size = h * w; int c_step = h * w; int batch_step = c_step * channels; int total_size = batch_step * batch; // dequant uint8_t* input_uint8 = (uint8_t*)input_tensor->data; uint8_t* output_uint8 = (uint8_t*)output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; float* data_fp32 = (float*)sys_malloc(total_size * sizeof(float)); for(int i = 0; i < total_size; i++) data_fp32[i] = ((float) input_uint8[i] - (float)input_zero) * input_scale; for (int n = 0; n < batch; n++) { //#pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = data_fp32 + batch_step * n + c_step * q; float* dst = data_fp32 + batch_step * n + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (src[i] > 6) dst[i] = 6; else if(src[i] < 0) dst[i] = 0; } } } // quant for(int i=0; i<total_size; i++) { int udata = round(data_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(data_fp32); return 0; } int ref_relu6_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i]; if (dst[i] > 6) dst[i] = 6; if (dst[i] < 0) dst[i] = 0; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if(input_tensor->data_type == TENGINE_DT_FP32) ret = ref_relu6_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_relu6_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* node = exec_node->ir_node; struct graph* ir_graph = node->graph; struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_relu6_ref_op() { return register_builtin_node_ops(OP_RELU6, &hcl_node_ops); } int unregister_relu6_ref_op() { return unregister_builtin_node_ops(OP_RELU6, &hcl_node_ops); }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ #if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT #include "MagickCore/threshold-map.h" #else static const char *const BuiltinMap= "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; #endif /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse) (void) FormatLocaleFile(stdout,"%.*g%%\n",GetMagickPrecision(),threshold); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorThresholdImage() forces all pixels in the color range to white % otherwise black. % % The format of the ColorThresholdImage method is: % % MagickBooleanType ColorThresholdImage(Image *image, % const PixelInfo *start_color,const PixelInfo *stop_color, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o start_color, stop_color: define the start and stop color range. Any % pixel within the range returns white otherwise black. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorThresholdImage(Image *image, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo start, stop; ssize_t y; /* Color threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=AcquireImageColormap(image,2,exception); if (status == MagickFalse) return(status); start=(*start_color); stop=(*stop_color); switch (image->colorspace) { case HCLColorspace: { ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSBColorspace: { ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSLColorspace: { ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSVColorspace: { ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HWBColorspace: { ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case LabColorspace: { ConvertRGBToLab(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } default: { start.red*=QuantumScale; start.green*=QuantumScale; start.blue*=QuantumScale; stop.red*=QuantumScale; stop.green*=QuantumScale; stop.blue*=QuantumScale; break; } } start.red*=QuantumRange; start.green*=QuantumRange; start.blue*=QuantumRange; stop.red*=QuantumRange; stop.green*=QuantumRange; stop.blue*=QuantumRange; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickBooleanType foreground = MagickTrue; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((q[i] < GetPixelInfoChannel(&start,channel)) || (q[i] > GetPixelInfoChannel(&stop,channel))) foreground=MagickFalse; } SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->colorspace=sRGBColorspace; return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels to dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with an ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { (void) GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=(Quantum) 0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=(Quantum) 0; else q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include "http_stream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int check_mistakes; #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed, int contrastive) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { if (contrastive && (i % 2) == 1) start_time_indexes[i] = start_time_indexes[i - 1]; else start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //printf(" index = %d, ", index); //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths_custom(char **paths, int n, int m, int contrastive) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); int old_index = 0; //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; if (contrastive && (i % 2 == 1)) index = old_index; else old_index = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **get_random_paths(char **paths, int n, int m) { return get_random_paths_custom(paths, n, m, 0); } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv, int contrastive) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; const int img_index = (contrastive) ? (i / 2) : i; if(dontuse_opencv) im = load_image_stb_resize(paths[img_index], 0, 0, 3); else im = load_image_color(paths[img_index], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[img_index]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } const int max_obj_img = 4000;// 30000; const int img_hash = (custom_hash(filename) % max_obj_img)*max_obj_img; //printf(" img_hash = %d, filename = %s; ", img_hash, filename); float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].track_id = count + img_hash; //printf(" boxes[count].track_id = %d, count = %d \n", boxes[count].track_id, count); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, int truth_size, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; int track_id = boxes[i].track_id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*truth_size +0] = x; truth[(i-sub)*truth_size +1] = y; truth[(i-sub)*truth_size +2] = w; truth[(i-sub)*truth_size +3] = h; truth[(i-sub)*truth_size +4] = id; truth[(i-sub)*truth_size +5] = track_id; //float val = track_id; //printf(" i = %d, sub = %d, truth_size = %d, track_id = %d, %f, %f\n", i, sub, truth_size, track_id, truth[(i - sub)*truth_size + 5], val); if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } int find_max(float *arr, int size) { int i; float max = 0; int n = 0; for (i = 0; i < size; ++i) { if (arr[i] > max) { max = arr[i]; n = i; } } return n; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps, int contrastive) { matrix y = make_matrix(n, k); int i; if (labels) { // supervised learning for (i = 0; i < n; ++i) { const int img_index = (contrastive) ? (i / 2) : i; fill_truth_smooth(paths[img_index], labels, k, y.vals[i], label_smooth_eps); //printf(" n = %d, i = %d, img_index = %d, class_id = %d \n", n, i, img_index, find_max(y.vals[i], k)); if (hierarchy) { fill_hierarchy(y.vals[i], k, hierarchy); } } } else { // unsupervised learning for (i = 0; i < n; ++i) { const int img_index = (contrastive) ? (i / 2) : i; const uintptr_t path_p = (uintptr_t)paths[img_index];// abs(random_gen()); const int class_id = path_p % k; int l; for (l = 0; l < k; ++l) y.vals[i][l] = 0; y.vals[i][class_id] = 1; } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, int truth_size, float *old_truth) { int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*truth_size]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*truth_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, int truth_size, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift, int net_w, int net_h, int mosaic_bound) { const float lowest_w = 1.F / net_w; const float lowest_h = 1.F / net_h; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*truth_size]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*truth_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*truth_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; if(mosaic_bound) { // fix out of Mosaic-bound float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0; if (i_mixup == 0) { left_bound = 0; right_bound = cut_x; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 1) { left_bound = cut_x; right_bound = w; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 2) { left_bound = 0; right_bound = cut_x; top_bound = cut_y; bot_bound = h; } if (i_mixup == 3) { left_bound = cut_x; right_bound = w; top_bound = cut_y; bot_bound = h; } if (left < left_bound) { //printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound); left = left_bound; } if (right > right_bound) { //printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound); right = right_bound; } if (top < top_bound) top = top_bound; if (bot > bot_bound) bot = bot_bound; xb = ((float)(right + left) / 2) / w; wb = ((float)(right - left)) / w; yb = ((float)(bot + top) / 2) / h; hb = ((float)(bot - top)) / h; } else { // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; } // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1 && wb > lowest_w && hb > lowest_h) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int contrastive_color, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; if (use_mixup == 2 || use_mixup == 4) { printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n"); if (check_mistakes) getchar(); if(use_mixup == 2) use_mixup = 0; else use_mixup = 3; } if (use_mixup == 3 && letter_box) { //printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); //if (check_mistakes) getchar(); //exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0, gaussian_noise = 0; d.y = make_matrix(n, truth_size*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else random_paths = get_random_paths_custom(paths, n, m, contrastive); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { printf("\n Error in load_data_detection() - OpenCV \n"); fflush(stdout); if (check_mistakes) { getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; // < 0 int min_rdh = oh*(1 - (1 / resize_down)) / 2; // < 0 if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; // > 0 int max_rdh = oh*(1 - (1 / resize_up)) / 2; // > 0 //printf(" down = %f, up = %f \n", (1 - (1 / resize_down)) / 2, (1 - (1 / resize_up)) / 2); if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); if (!contrastive || contrastive_jit_flip || i % 2 == 0) { r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); flip = use_flip ? random_gen() % 2 : 0; } r_scale = random_float(); if (!contrastive || contrastive_color || i % 2 == 0) { dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); } if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise; else gaussian_noise = 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently //printf(" letter_box = %d \n", letter_box); if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); } // move each 2nd image to the corner - so that most of it was visible if (use_mixup == 3 && random_gen() % 2 == 0) { if (flip) { if (i_mixup == 0) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pleft += pright, pright = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; } else { if (i_mixup == 0) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pleft += pright, pright = 0, ptop += pbot, pbot = 0; } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); //for (int z = 0; z < boxes; ++z) if(truth[z*truth_size] > 0) printf(" track_id = %f \n", truth[z*truth_size + 5]); //printf(" truth_size = %d \n", truth_size); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, boxes, truth_size, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth_size, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth_size, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift, w, h, mosaic_bound); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int truth_size, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int mosaic_bound, int contrastive, int contrastive_jit_flip, int contrastive_color, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else random_paths = get_random_paths_custom(paths, n, m, contrastive); //assert(use_mixup < 2); if (use_mixup == 2) { printf("\n cutmix=1 - isn't supported for Detector \n"); exit(0); } if (use_mixup == 3 || use_mixup == 4) { printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n"); exit(0); } int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed, contrastive); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, truth_size * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(truth_size * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; int min_rdh = oh*(1 - (1 / resize_down)) / 2; if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; int max_rdh = oh*(1 - (1 / resize_up)) / 2; if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); if (!contrastive || contrastive_jit_flip || i % 2 == 0) { r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); flip = use_flip ? random_gen() % 2 : 0; } r_scale = random_float(); if (!contrastive || contrastive_color || i % 2 == 0) { dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); } } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth_size, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, truth_size, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, truth_size * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*truth_size, 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv, a.contrastive); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.truth_size, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.resize, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.mosaic_bound, a.contrastive, a.contrastive_jit_flip, a.contrastive_color, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } static const int thread_wait_ms = 5; static volatile int flag_exit; static volatile int * run_load_data = NULL; static load_args * args_swap = NULL; static pthread_t* threads = NULL; pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER; void *run_thread_loop(void *ptr) { const int i = *(int *)ptr; while (!custom_atomic_load_int(&flag_exit)) { while (!custom_atomic_load_int(&run_load_data[i])) { if (custom_atomic_load_int(&flag_exit)) { free(ptr); return 0; } this_thread_sleep_for(thread_wait_ms); } pthread_mutex_lock(&mtx_load_data); load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args)); *args_local = args_swap[i]; pthread_mutex_unlock(&mtx_load_data); load_thread(args_local); custom_atomic_store_int(&run_load_data[i], 0); } free(ptr); return 0; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); if (!threads) { threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int)); args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args)); fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads); for (i = 0; i < args.threads; ++i) { int* ptr = (int*)xcalloc(1, sizeof(int)); *ptr = i; if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed"); } } for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; pthread_mutex_lock(&mtx_load_data); args_swap[i] = args; pthread_mutex_unlock(&mtx_load_data); custom_atomic_store_int(&run_load_data[i], 1); // run thread } for (i = 0; i < args.threads; ++i) { while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join } /* pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } */ *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); //free(threads); return 0; } void free_load_threads(void *ptr) { load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; int i; if (threads) { custom_atomic_store_int(&flag_exit, 1); for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } free((void*)run_load_data); free(args_swap); free(threads); threads = NULL; custom_atomic_store_int(&flag_exit, 0); } } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv, int contrastive) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps, contrastive); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv, contrastive); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps, contrastive); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 4 == 0) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 15; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
GB_unaryop__lnot_uint8_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_int64 // op(A') function: GB_tran__lnot_uint8_int64 // C type: uint8_t // A type: int64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_int64 ( uint8_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_uint64 // op(A') function: GB_tran__minv_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; class WhereClause; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; /// The checked scope specifier for the statement. unsigned CSS : 2; }; enum { NumStmtBits = 10 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>; }; enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> }; class ConstantExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class ConstantExpr; unsigned : NumExprBits; /// The kind of result that is tail-allocated. unsigned ResultKind : 2; /// The kind of Result as defined by APValue::Kind. unsigned APValueKind : 4; /// When ResultKind == RSK_Int64, true if the tail-allocated integer is /// unsigned. unsigned IsUnsigned : 1; /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated /// integer. 7 bits because it is the minimal number of bits to represent a /// value from 0 to 64 (the size of the tail-allocated integer). unsigned BitWidth : 7; /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the /// tail-allocated APValue. unsigned HasCleanup : 1; /// True if this ConstantExpr was created for immediate invocation. unsigned IsImmediateInvocation : 1; }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum { NumBoundsCheckKindBits = 2 }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; // /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArrayOrMatrixSubscriptExprBitfields { friend class ArraySubscriptExpr; friend class MatrixSubscriptExpr; unsigned : NumExprBits; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation RBracketLoc; }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 3 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 7; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. unsigned BoundsSafeInterface : 1; /// True if the call expression has some floating-point features. unsigned HasFPFeatures : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types when additional values need to be in trailing storage. /// It is 0 otherwise. unsigned HasFPFeatures : 1; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; class StmtExprBitfields { friend class ASTStmtReader; friend class StmtExpr; unsigned : NumExprBits; /// The number of levels of template parameters enclosing this statement /// expression. Used to determine if a statement expression remains /// dependent after instantiation. unsigned TemplateDepth; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; }; class CXXRewrittenBinaryOperatorBitfields { friend class ASTStmtReader; friend class CXXRewrittenBinaryOperator; unsigned : NumCallExprBits; unsigned IsReversed : 1; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. According to [implimits] /// 8 bits would be enough, but we require (and test for) at least 16 bits /// to mirror FunctionType. unsigned NumArgs; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; class LambdaExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class LambdaExpr; unsigned : NumExprBits; /// The default capture kind, which is a value of type /// LambdaCaptureDefault. unsigned CaptureDefault : 2; /// Whether this lambda had an explicit parameter list vs. an /// implicit (and empty) parameter list. unsigned ExplicitParams : 1; /// Whether this lambda had the result type explicitly specified. unsigned ExplicitResultType : 1; /// The number of captures. unsigned NumCaptures : 16; }; class RequiresExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class RequiresExpr; unsigned : NumExprBits; unsigned IsSatisfied : 1; SourceLocation RequiresKWLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; enum { NumBoundsExprKindBits = 3 }; class BoundsExprBitfields { friend class BoundsExpr; unsigned : NumExprBits; unsigned Kind : NumBoundsExprKindBits; unsigned IsCompilerGenerated : 1; }; enum { NumInteropTypeExprKindBits = 1 }; class InteropTypeExprBitfields { friend class InteropTypeExpr; unsigned : NumExprBits; unsigned IsCompilerGenerated : 1; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; ConstantExprBitfields ConstantExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // GNU Extensions. StmtExprBitfields StmtExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; LambdaExprBitfields LambdaExprBits; RequiresExprBitfields RequiresExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; BoundsExprBitfields BoundsExprBits; InteropTypeExprBitfields InteropTypeExprBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; /// The likelihood of a branch being taken. enum Likelihood { LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute. LH_None, ///< No attribute set or branches of the IfStmt have ///< the same attribute. LH_Likely ///< Branch has the [[likely]] attribute. }; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; CheckedScopeSpecifier getCheckedScopeSpecifier() const { return static_cast<CheckedScopeSpecifier>(StmtBits.CSS); } void setCheckedScopeSpecifier(CheckedScopeSpecifier CSS) { StmtBits.CSS = CSS; } /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \returns the likelihood of a set of attributes. static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs); /// \returns the likelihood of a statement. static Likelihood getLikelihood(const Stmt *S); /// \returns the likelihood attribute of a statement. static const Attr *getLikelihoodAttr(const Stmt *S); /// \returns the likelihood of the 'then' branch of an 'if' statement. The /// 'else' branch is required to determine whether both branches specify the /// same likelihood, which affects the result. static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else); /// \returns whether the likelihood of the branches of an if statement are /// conflicting. When the first element is \c true there's a conflict and /// the Attr's are the conflicting attributes of the Then and Else Stmt. static std::tuple<bool, const Attr *, const Attr *> determineLikelihoodConflict(const Stmt *Then, const Stmt *Else); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(raw_ostream &OS, const ASTContext &Context) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// Checked C /// A Bitfield to indicate the position of a statement within a bundle. class BundlePositionBitfield { friend class DeclStmt; friend class ValueStmt; unsigned char FirstStmt : 1; unsigned char LastStmt : 1; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; BundlePositionBitfield PosInBundle; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) { PosInBundle.FirstStmt = 0; PosInBundle.LastStmt = 0; } /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { PosInBundle.FirstStmt = 0; PosInBundle.LastStmt = 0; } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } void markFirstStmtOfBundledBlk() { PosInBundle.FirstStmt = 1; } bool isFirstStmtOfBundledBlk() const { return PosInBundle.FirstStmt == 1; } void markLastStmtOfBundledBlk() { PosInBundle.LastStmt = 1; } bool isLastStmtOfBundledBlk() const { return PosInBundle.LastStmt == 1; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { private: WhereClause *WClause; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), WClause(nullptr) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), WClause(nullptr) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } void setWhereClause(WhereClause *WC) { WClause = WC; } WhereClause *getWhereClause() const { return WClause; } }; // The kind of Checked C checking to do in a scope. enum class CheckedScopeKind { // No checking. Unchecked = 0x1, /// Check properties for bounds safety. Bounds = 0x2, /// Check properties for bounds safety and preventing type confusion. BoundsAndTypes = 0x4 }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; // Written checked scope specifier. unsigned WrittenCSS : 2; // Inferred checked scope specifier, using information from parent // scope also. unsigned CSS : 2; // Checked scope keyword (_Checked / _Unchecked) location. SourceLocation CSSLoc; // Checked scope modifier (_Bounds_only) location. SourceLocation CSMLoc; // Bundled keyword (_Bundled) location. SourceLocation BNDLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB, CheckedScopeSpecifier WrittenCSS = CSS_None, CheckedScopeSpecifier CSS = CSS_Unchecked, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation(), SourceLocation BNDLoc = SourceLocation()); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(), CSMLoc(), BNDLoc() {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB, CheckedScopeSpecifier WrittenCSS = CSS_None, CheckedScopeSpecifier CSS = CSS_Unchecked, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation(), SourceLocation BNDLoc = SourceLocation()); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc), WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(Loc), CSMLoc(Loc), BNDLoc(SourceLocation()) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } CheckedScopeSpecifier getWrittenCheckedSpecifier() const { return (CheckedScopeSpecifier) WrittenCSS; } CheckedScopeSpecifier getCheckedSpecifier() const { return (CheckedScopeSpecifier) CSS; } void setWrittenCheckedSpecifiers(CheckedScopeSpecifier NS) { WrittenCSS = NS; } void setCheckedSpecifiers(CheckedScopeSpecifier NS) { CSS = NS; } bool isCheckedScope() const { return CSS != CSS_Unchecked; } bool isBundledStmt() const { return BNDLoc.isValid(); } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } // Get the Stmt that StmtExpr would consider to be the result of this // compound statement. This is used by StmtExpr to properly emulate the GCC // compound expression extension, which ignores trailing NullStmts when // getting the result of the expression. // i.e. ({ 5;;; }) // ^^ ignored // If we don't find something that isn't a NullStmt, just return the last // Stmt. Stmt *getStmtExprResult() { for (auto *B : llvm::reverse(body())) { if (!isa<NullStmt>(B)) return B; } return body_back(); } const Stmt *getStmtExprResult() const { return const_cast<CompoundStmt *>(this)->getStmtExprResult(); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } SourceLocation getCheckedSpecifierLoc() const { return CSSLoc; } SourceLocation getSpecifierModifierLoc() const { return CSMLoc; } SourceLocation getBundledSpecifierLoc() const { return BNDLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; private: WhereClause *WClause = nullptr; BundlePositionBitfield PosInBundle; public: /// Build a value statement. ValueStmt(StmtClass SC) : Stmt(SC) { PosInBundle.FirstStmt = 0; PosInBundle.LastStmt = 0; } /// Build an empty value statement. explicit ValueStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) { PosInBundle.FirstStmt = 0; PosInBundle.LastStmt = 0; } const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } void setWhereClause(WhereClause *WC) { WClause = WC; } WhereClause *getWhereClause() const { return WClause; } void markFirstStmtOfBundledBlk() { PosInBundle.FirstStmt = 1; } bool isFirstStmtOfBundledBlk() const { return PosInBundle.FirstStmt == 1; } void markLastStmtOfBundledBlk() { PosInBundle.LastStmt = 1; } bool isLastStmtOfBundledBlk() const { return PosInBundle.LastStmt == 1; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL, SourceLocation RPL, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } /// If this is an 'if constexpr', determine which substatement will be taken. /// Otherwise, or if the condition is value-dependent, returns None. Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const; bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc; SourceLocation RParenLoc; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; SourceLocation LParenLoc, RParenLoc; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL, SourceLocation LParenLoc, SourceLocation RParenLoc); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumOutputs + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumOutputs + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumOutputs + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumOutputs + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H #include "../InternalHeaderCheck.h" namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads) if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { #ifdef EIGEN_HAS_OPENMP GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} std::atomic<Index> sync; std::atomic<int> users; #else GemmParallelInfo() : lhs_start(0), lhs_length(0) {} #endif Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types // Without C++11, we have to disable GEMM's parallelization on // non x86 architectures because there volatile is not enough for our purpose. // See bug 1572. #if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redesigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) )); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
UI_mainConsole.h
#pragma once #define OLC_PGE_APPLICATION #include "olcPixelGameEngine.h" #include "BallCollisionEngine.h" class BallCollision : public olc::PixelGameEngine { public: BallCollision() { sAppName = "BallCollision"; } private: BallCollisionEngine* _engine; Ball* _selectedBall; Line* _selectedLine; bool _isStartLine; inline bool _isPointInBall(float x1, float y1, float r1, float x, float y) { return fabs((x1 - x) * (x1 - x) + (y1 - y) * (y1 - y)) < (r1 * r1); } inline void _selectAndDragBall() { if (GetMouse(olc::Mouse::LEFT).bPressed || GetMouse(olc::Mouse::RIGHT).bPressed) { _selectedBall = nullptr; for (auto& ball : _engine->Balls) { if (_isPointInBall(ball.x, ball.y, ball.r, GetMouseX(), GetMouseY())) { _selectedBall = &ball; break; } } _selectedLine = nullptr; for (auto& line : _engine->Lines) { if (_isPointInBall(line.sx, line.sy, line.r, GetMouseX(), GetMouseY())) { _selectedLine = &line; _isStartLine = true; } } for (auto& line : _engine->Lines) { if (_isPointInBall(line.ex, line.ey, line.r, GetMouseX(), GetMouseY())) { _selectedLine = &line; _isStartLine = false; } } } if (GetMouse(olc::Mouse::LEFT).bHeld) { if (_selectedBall) { _selectedBall->x = GetMouseX(); _selectedBall->y = GetMouseY(); } if (_selectedLine && _isStartLine) { _selectedLine->sx = GetMouseX(); _selectedLine->sy = GetMouseY(); } if (_selectedLine && !_isStartLine) { _selectedLine->ex = GetMouseX(); _selectedLine->ey = GetMouseY(); } } if (GetMouse(olc::Mouse::LEFT).bReleased) { _selectedBall = nullptr; _selectedLine = nullptr; } if (GetMouse(olc::Mouse::RIGHT).bReleased) { if (_selectedBall) { _selectedBall->vx = 20.0f * (_selectedBall->x - (float)GetMouseX()); _selectedBall->vy = 20.0f * (_selectedBall->y - (float)GetMouseY()); } _selectedBall = nullptr; } } public: bool OnUserCreate() override { _engine = new BallCollisionEngine(ScreenWidth(), ScreenHeight(), 700); _engine->Create(); return true; } bool OnUserUpdate(float fElapsedTime) override { _selectAndDragBall(); _engine->Update(fElapsedTime); // clear the screen FillRect(0, 0, ScreenWidth(), ScreenHeight(), olc::Pixel(0, 0, 0)); // draw ball #pragma omp parallel for for (int i = 0; i < _engine->Balls.size(); i++) { int r = std::min(sqrt(_engine->Balls[i].vx * _engine->Balls[i].vx + _engine->Balls[i].vy * _engine->Balls[i].vy) + 100.0f, 255.0f); FillCircle(_engine->Balls[i].x, _engine->Balls[i].y, _engine->Balls[i].r, olc::Pixel(r, 255 - r, 255 - r)); } // draw line for (auto& line : _engine->Lines) { float nx = -(line.ey - line.sy); float ny = (line.ex - line.sx); float d = sqrt(nx * nx + ny * ny); nx /= d; ny /= d; DrawLine((line.sx + nx * line.r), (line.sy + ny * line.r), (line.ex + nx * line.r), (line.ey + ny * line.r), olc::GREY); DrawLine((line.sx - nx * line.r), (line.sy - ny * line.r), (line.ex - nx * line.r), (line.ey - ny * line.r), olc::GREY); FillCircle(line.sx, line.sy, line.r, olc::GREY); FillCircle(line.ex, line.ey, line.r, olc::GREY); } // draw right click line if (_selectedBall) { DrawLine(_selectedBall->x, _selectedBall->y, GetMouseX(), GetMouseY(), olc::BLUE); } return true; } }; void RunBallCollision() { BallCollision ball_collision; if (ball_collision.Construct(1536, 1024, 1, 1)) ball_collision.Start(); }
layerramdistancetransform.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2017-2020 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #pragma once #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/util/indexmapper.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #ifdef IVW_USE_OPENMP #include <omp.h> #endif namespace inviwo { namespace util { /** * Implementation of Euclidean Distance Transform according to Saito's algorithm: * T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations * of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11). * pp. 1551-1565, 1994. * http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf * * Calculates the distance in base mat space * * Predicate is a function of type (const T &value) -> bool to deside if a value in the input * is a "feature". * * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all * squared distance values at the end of the calculation. * * ProcessCallback is a function of type (double progress) -> void that is called with a value * from 0 to 1 to indicate the progress of the calculation. */ template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer, LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename T, typename U> void layerRAMDistanceTransform(const LayerRAMPrecision<T>* inVolume, LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis, const size2_t upsample); template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename U, typename ProgressCallback> void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback callback); template <typename U> void layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale); } // namespace util template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer, LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { #ifdef IVW_USE_OPENMP omp_set_num_threads(std::thread::hardware_concurrency()); #endif using int64 = glm::int64; auto square = [](auto a) { return a * a; }; callback(0.0); const T* src = inLayer->getDataTyped(); U* dst = outDistanceField->getDataTyped(); const i64vec2 srcDim{inLayer->getDimensions()}; const i64vec2 dstDim{outDistanceField->getDimensions()}; const i64vec2 sm{upsample}; const auto squareBasis = glm::transpose(basis) * basis; const Vector<2, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1]}; const Vector<2, U> squareVoxelSize{squareBasisDiag / Vector<2, U>{dstDim * dstDim}}; const Vector<2, U> invSquareVoxelSize{Vector<2, U>{1.0f} / squareVoxelSize}; { const auto maxdist = glm::compMax(squareBasisDiag); bool orthogonal = true; for (size_t i = 0; i < squareBasis.length(); i++) { for (size_t j = 0; j < squareBasis.length(); j++) { if (i != j) { if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) { orthogonal = false; break; } } } } if (!orthogonal) { LogWarnCustom( "layerRAMDistanceTransform", "Calculating the distance transform on a non-orthogonal layer will not give " "correct values"); } } if (srcDim * sm != dstDim) { throw Exception( "DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) + " dst = " + toString(dstDim) + " scaling = " + toString(sm), IVW_CONTEXT_CUSTOM("layerRAMDistanceTransform")); } util::IndexMapper<2, int64> srcInd(srcDim); util::IndexMapper<2, int64> dstInd(dstDim); auto is_feature = [&](const int64 x, const int64 y) { return predicate(src[srcInd(x / sm.x, y / sm.y)]); }; // first pass, forward and backward scan along x // result: min distance in x direction #ifdef IVW_USE_OPENMP #pragma omp parallel for #endif for (int64 y = 0; y < dstDim.y; ++y) { // forward U dist = static_cast<U>(dstDim.x); for (int64 x = 0; x < dstDim.x; ++x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = squareVoxelSize.x * square(dist); } // backward dist = static_cast<U>(dstDim.x); for (int64 x = dstDim.x - 1; x >= 0; --x) { if (!is_feature(x, y)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y)] = std::min<U>(dst[dstInd(x, y)], squareVoxelSize.x * square(dist)); } } // second pass, scan y direction // for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY // result: min distance in x and y direction callback(0.45); #ifdef IVW_USE_OPENMP #pragma omp parallel #endif { std::vector<U> buff; buff.resize(dstDim.y); #ifdef IVW_USE_OPENMP #pragma omp for #endif for (int64 x = 0; x < dstDim.x; ++x) { // cache column data into temporary buffer for (int64 y = 0; y < dstDim.y; ++y) { buff[y] = dst[dstInd(x, y)]; } for (int64 y = 0; y < dstDim.y; ++y) { auto d = buff[y]; if (d != U(0)) { const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1; const auto rStart = std::min(rMax, y - 1); const auto rEnd = std::min(rMax, dstDim.y - y); for (int64 n = -rStart; n < rEnd; ++n) { const auto w = buff[y + n] + squareVoxelSize.y * square(n); if (w < d) d = w; } } dst[dstInd(x, y)] = d; } } } // scale data callback(0.9); const int64 layerSize = dstDim.x * dstDim.y; #ifdef IVW_USE_OPENMP #pragma omp parallel for #endif for (int64 i = 0; i < layerSize; ++i) { dst[i] = valueTransform(dst[i]); } callback(1.0); } template <typename T, typename U> void util::layerRAMDistanceTransform(const LayerRAMPrecision<T>* inLayer, LayerRAMPrecision<U>* outDistanceField, const Matrix<2, U> basis, const size2_t upsample) { util::layerRAMDistanceTransform( inLayer, outDistanceField, basis, upsample, [](const T& val) { return util::glm_convert_normalized<double>(val) > 0.5; }, [](const U& squareDist) { return static_cast<U>(std::sqrt(static_cast<double>(squareDist))); }, [](double f) {}); } template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField, const size2_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicate, valueTransform, callback); }); } template <typename U, typename ProgressCallback> void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback progress) { const auto inputLayerRep = inLayer->getRepresentation<LayerRAM>(); inputLayerRep->dispatch<void, dispatching::filter::Scalars>([&](const auto lrprecision) { using ValueType = util::PrecisionValueType<decltype(lrprecision)>; const auto predicateIn = [threshold](const ValueType& val) { return val < threshold; }; const auto predicateOut = [threshold](const ValueType& val) { return val > threshold; }; const auto normPredicateIn = [threshold](const ValueType& val) { return util::glm_convert_normalized<double>(val) < threshold; }; const auto normPredicateOut = [threshold](const ValueType& val) { return util::glm_convert_normalized<double>(val) > threshold; }; const auto valTransIdent = [scale](const float& squareDist) { return static_cast<float>(scale * squareDist); }; const auto valTransSqrt = [scale](const float& squareDist) { return static_cast<float>(scale * std::sqrt(squareDist)); }; if (normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransIdent, progress); } else if (normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransIdent, progress); } else if (normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateIn, valTransSqrt, progress); } else if (normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, normPredicateOut, valTransSqrt, progress); } else if (!normalize && square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransIdent, progress); } else if (!normalize && square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransIdent, progress); } else if (!normalize && !square && flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateIn, valTransSqrt, progress); } else if (!normalize && !square && !flip) { util::layerRAMDistanceTransform(lrprecision, outDistanceField, inLayer->getBasis(), upsample, predicateOut, valTransSqrt, progress); } }); } template <typename U> void util::layerDistanceTransform(const Layer* inLayer, LayerRAMPrecision<U>* outDistanceField, const size2_t upsample, double threshold, bool normalize, bool flip, bool square, double scale) { util::layerDistanceTransform(inLayer, outDistanceField, upsample, threshold, normalize, flip, square, scale, [](double) {}); } } // namespace inviwo
FunctorsOpenMP.h
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //============================================================================ #ifndef vtk_m_cont_openmp_internal_FunctorsOpenMP_h #define vtk_m_cont_openmp_internal_FunctorsOpenMP_h #include <vtkm/cont/openmp/internal/DeviceAdapterTagOpenMP.h> #include <vtkm/cont/internal/FunctorsGeneral.h> #include <vtkm/BinaryOperators.h> #include <vtkm/BinaryPredicates.h> #include <vtkm/Pair.h> #include <vtkm/Types.h> #include <vtkm/cont/ArrayHandle.h> #include <vtkm/cont/ErrorExecution.h> #include <omp.h> #include <algorithm> #include <type_traits> #include <vector> // Wrap all '#pragma omp ...' calls in this macro so we can disable them in // non-omp builds and avoid a multitude of 'ignoring pragma..." warnings. #ifdef _OPENMP #define VTKM_OPENMP_DIRECTIVE_IMPL(fullDir) _Pragma(#fullDir) #define VTKM_OPENMP_DIRECTIVE(dir) VTKM_OPENMP_DIRECTIVE_IMPL(omp dir) #else // _OPENMP #define VTKM_OPENMP_DIRECTIVE(directive) #endif // _OPENMP // See "OpenMP data sharing" section of // https://www.gnu.org/software/gcc/gcc-9/porting_to.html. OpenMP broke // backwards compatibility regarding const variable handling. // tl;dr, put all const variables accessed from openmp blocks in a // VTKM_OPENMP_SHARED_CONST(var1, var2, ...) macro. This will do The Right Thing // on all gcc. #if defined(VTKM_GCC) && (__GNUC__ < 9) #define VTKM_OPENMP_SHARED_CONST(...) #else #define VTKM_OPENMP_SHARED_CONST(...) shared(__VA_ARGS__) #endif // When defined, supported type / operator combinations will use the OpenMP // reduction(...) clause. Otherwise, all reductions use the general // implementation with a manual reduction once the threads complete. // I don't know how, but the benchmarks currently perform better without the // specializations. //#define VTKM_OPENMP_USE_NATIVE_REDUCTION namespace vtkm { namespace cont { namespace openmp { constexpr static vtkm::Id VTKM_CACHE_LINE_SIZE = 64; constexpr static vtkm::Id VTKM_PAGE_SIZE = 4096; // Returns ceil(num/den) for integral types template <typename T> static constexpr T CeilDivide(const T& numerator, const T& denominator) { return (numerator + denominator - 1) / denominator; } // Computes the number of values per chunk. Note that numChunks + chunkSize may // exceed numVals, so be sure to check upper limits. static void ComputeChunkSize(const vtkm::Id numVals, const vtkm::Id numThreads, const vtkm::Id chunksPerThread, const vtkm::Id bytesPerValue, vtkm::Id& numChunks, vtkm::Id& valuesPerChunk) { // try to evenly distribute pages across chunks: const vtkm::Id bytesIn = numVals * bytesPerValue; const vtkm::Id pagesIn = CeilDivide(bytesIn, VTKM_PAGE_SIZE); // If we don't have enough pages to honor chunksPerThread, ignore it: numChunks = (pagesIn > numThreads * chunksPerThread) ? numThreads * chunksPerThread : numThreads; const vtkm::Id pagesPerChunk = CeilDivide(pagesIn, numChunks); valuesPerChunk = CeilDivide(pagesPerChunk * VTKM_PAGE_SIZE, bytesPerValue); } template <typename T, typename U> static void DoCopy(T src, U dst, vtkm::Id numVals, std::true_type) { if (numVals) { std::copy(src, src + numVals, dst); } } // Don't use std::copy when type conversion is required because MSVC. template <typename InIterT, typename OutIterT> static void DoCopy(InIterT inIter, OutIterT outIter, vtkm::Id numVals, std::false_type) { using ValueType = typename std::iterator_traits<OutIterT>::value_type; for (vtkm::Id i = 0; i < numVals; ++i) { *(outIter++) = static_cast<ValueType>(*(inIter++)); } } template <typename InIterT, typename OutIterT> static void DoCopy(InIterT inIter, OutIterT outIter, vtkm::Id numVals) { using InValueType = typename std::iterator_traits<InIterT>::value_type; using OutValueType = typename std::iterator_traits<OutIterT>::value_type; DoCopy(inIter, outIter, numVals, std::is_same<InValueType, OutValueType>()); } template <typename InPortalT, typename OutPortalT> static void CopyHelper(InPortalT inPortal, OutPortalT outPortal, vtkm::Id inStart, vtkm::Id outStart, vtkm::Id numVals) { using InValueT = typename InPortalT::ValueType; using OutValueT = typename OutPortalT::ValueType; constexpr auto isSame = std::is_same<InValueT, OutValueT>(); auto inIter = vtkm::cont::ArrayPortalToIteratorBegin(inPortal) + inStart; auto outIter = vtkm::cont::ArrayPortalToIteratorBegin(outPortal) + outStart; vtkm::Id valuesPerChunk; VTKM_OPENMP_DIRECTIVE(parallel default(none) shared(inIter, outIter, valuesPerChunk, numVals) VTKM_OPENMP_SHARED_CONST(isSame)) { VTKM_OPENMP_DIRECTIVE(single) { // Evenly distribute full pages to all threads. We manually chunk the // data here so that we can exploit std::copy's memmove optimizations. vtkm::Id numChunks; ComputeChunkSize( numVals, omp_get_num_threads(), 8, sizeof(InValueT), numChunks, valuesPerChunk); } VTKM_OPENMP_DIRECTIVE(for schedule(static)) for (vtkm::Id i = 0; i < numVals; i += valuesPerChunk) { vtkm::Id chunkSize = std::min(numVals - i, valuesPerChunk); DoCopy(inIter + i, outIter + i, chunkSize, isSame); } } } struct CopyIfHelper { vtkm::Id NumValues; vtkm::Id NumThreads; vtkm::Id ValueSize; vtkm::Id NumChunks; vtkm::Id ChunkSize; std::vector<vtkm::Id> EndIds; CopyIfHelper() = default; void Initialize(vtkm::Id numValues, vtkm::Id valueSize) { this->NumValues = numValues; this->NumThreads = static_cast<vtkm::Id>(omp_get_num_threads()); this->ValueSize = valueSize; // Evenly distribute pages across the threads. We manually chunk the // data here so that we can exploit std::copy's memmove optimizations. ComputeChunkSize( this->NumValues, this->NumThreads, 8, valueSize, this->NumChunks, this->ChunkSize); this->EndIds.resize(static_cast<std::size_t>(this->NumChunks)); } template <typename InIterT, typename StencilIterT, typename OutIterT, typename PredicateT> void CopyIf(InIterT inIter, StencilIterT stencilIter, OutIterT outIter, PredicateT pred, vtkm::Id chunk) { vtkm::Id startPos = std::min(chunk * this->ChunkSize, this->NumValues); vtkm::Id endPos = std::min((chunk + 1) * this->ChunkSize, this->NumValues); vtkm::Id outPos = startPos; for (vtkm::Id inPos = startPos; inPos < endPos; ++inPos) { if (pred(stencilIter[inPos])) { outIter[outPos++] = inIter[inPos]; } } this->EndIds[static_cast<std::size_t>(chunk)] = outPos; } template <typename OutIterT> vtkm::Id Reduce(OutIterT data) { vtkm::Id endPos = this->EndIds.front(); for (vtkm::Id i = 1; i < this->NumChunks; ++i) { vtkm::Id chunkStart = std::min(i * this->ChunkSize, this->NumValues); vtkm::Id chunkEnd = this->EndIds[static_cast<std::size_t>(i)]; vtkm::Id numValuesToCopy = chunkEnd - chunkStart; if (numValuesToCopy > 0 && chunkStart != endPos) { std::copy(data + chunkStart, data + chunkEnd, data + endPos); } endPos += numValuesToCopy; } return endPos; } }; #ifdef VTKM_OPENMP_USE_NATIVE_REDUCTION // OpenMP only declares reduction operations for primitive types. This utility // detects if a type T is supported. template <typename T> struct OpenMPReductionSupported : std::false_type { }; template <> struct OpenMPReductionSupported<Int8> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt8> : std::true_type { }; template <> struct OpenMPReductionSupported<Int16> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt16> : std::true_type { }; template <> struct OpenMPReductionSupported<Int32> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt32> : std::true_type { }; template <> struct OpenMPReductionSupported<Int64> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt64> : std::true_type { }; template <> struct OpenMPReductionSupported<Float32> : std::true_type { }; template <> struct OpenMPReductionSupported<Float64> : std::true_type { }; #else template <typename T> using OpenMPReductionSupported = std::false_type; #endif // VTKM_OPENMP_USE_NATIVE_REDUCTION struct ReduceHelper { // std::is_integral, but adapted to see through vecs and pairs. template <typename T> struct IsIntegral : public std::is_integral<T> { }; template <typename T, vtkm::IdComponent Size> struct IsIntegral<vtkm::Vec<T, Size>> : public std::is_integral<T> { }; template <typename T, typename U> struct IsIntegral<vtkm::Pair<T, U>> : public std::integral_constant<bool, std::is_integral<T>::value && std::is_integral<U>::value> { }; // Generic implementation: template <typename PortalT, typename ReturnType, typename Functor> static ReturnType Execute(PortalT portal, ReturnType init, Functor functorIn, std::false_type) { internal::WrappedBinaryOperator<ReturnType, Functor> f(functorIn); const vtkm::Id numVals = portal.GetNumberOfValues(); auto data = vtkm::cont::ArrayPortalToIteratorBegin(portal); bool doParallel = false; int numThreads = 0; std::unique_ptr<ReturnType[]> threadData; VTKM_OPENMP_DIRECTIVE(parallel default(none) firstprivate(f) shared( data, doParallel, numThreads, threadData) VTKM_OPENMP_SHARED_CONST(numVals)) { int tid = omp_get_thread_num(); VTKM_OPENMP_DIRECTIVE(single) { numThreads = omp_get_num_threads(); if (numVals >= numThreads * 2) { doParallel = true; threadData.reset(new ReturnType[static_cast<std::size_t>(numThreads)]); } } if (doParallel) { // Static dispatch to unroll non-integral types: const ReturnType localResult = ReduceHelper::DoParallelReduction<ReturnType>( data, numVals, tid, numThreads, f, IsIntegral<ReturnType>{}); threadData[static_cast<std::size_t>(tid)] = localResult; } } // end parallel if (doParallel) { // do the final reduction serially: for (size_t i = 0; i < static_cast<size_t>(numThreads); ++i) { init = f(init, threadData[i]); } } else { // Not enough threads. Do the entire reduction in serial: for (vtkm::Id i = 0; i < numVals; ++i) { init = f(init, data[i]); } } return init; } // non-integer reduction: unroll loop manually. // This gives faster code for floats and non-trivial types. template <typename ReturnType, typename IterType, typename FunctorType> static ReturnType DoParallelReduction(IterType data, vtkm::Id numVals, int tid, int numThreads, FunctorType f, std::false_type /* isIntegral */) { // Use the first (numThreads*2) values for initializing: ReturnType accum = f(data[2 * tid], data[2 * tid + 1]); vtkm::Id i = numThreads * 2; const vtkm::Id unrollEnd = ((numVals / 4) * 4) - 4; VTKM_OPENMP_DIRECTIVE(for schedule(static)) for (i = numThreads * 2; i < unrollEnd; i += 4) { const auto t1 = f(data[i], data[i + 1]); const auto t2 = f(data[i + 2], data[i + 3]); accum = f(accum, t1); accum = f(accum, t2); } // Let the last thread mop up any remaining values as it would // have just accessed the adjacent data if (tid == numThreads - 1) { for (i = unrollEnd; i < numVals; ++i) { accum = f(accum, data[i]); } } return accum; } // Integer reduction: no unrolling. Ints vectorize easily and unrolling can // hurt performance. template <typename ReturnType, typename IterType, typename FunctorType> static ReturnType DoParallelReduction(IterType data, vtkm::Id numVals, int tid, int numThreads, FunctorType f, std::true_type /* isIntegral */) { // Use the first (numThreads*2) values for initializing: ReturnType accum = f(data[2 * tid], data[2 * tid + 1]); // Assign each thread chunks of the remaining values for local reduction VTKM_OPENMP_DIRECTIVE(for schedule(static)) for (vtkm::Id i = numThreads * 2; i < numVals; i++) { accum = f(accum, data[i]); } return accum; } #ifdef VTKM_OPENMP_USE_NATIVE_REDUCTION // Specialize for vtkm functors with OpenMP special cases: #define VTKM_OPENMP_SPECIALIZE_REDUCE1(FunctorType, PragmaString) \ template <typename PortalT, typename ReturnType> \ static ReturnType Execute( \ PortalT portal, ReturnType value, FunctorType functorIn, std::true_type) \ { \ const vtkm::Id numValues = portal.GetNumberOfValues(); \ internal::WrappedBinaryOperator<ReturnType, FunctorType> f(functorIn); \ _Pragma(#PragmaString) for (vtkm::Id i = 0; i < numValues; ++i) \ { \ value = f(value, portal.Get(i)); \ } \ return value; \ } // Constructing the pragma string inside the _Pragma call doesn't work so // we jump through a hoop: #define VTKM_OPENMP_SPECIALIZE_REDUCE(FunctorType, Operator) \ VTKM_OPENMP_SPECIALIZE_REDUCE1(FunctorType, "omp parallel for reduction(" #Operator ":value)") // + (Add, Sum) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Add, +) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Sum, +) // * (Multiply, Product) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Multiply, *) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Product, *) // - (Subtract) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Subtract, -) // & (BitwiseAnd) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseAnd, &) // | (BitwiseOr) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseOr, |) // ^ (BitwiseXor) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseXor, ^) // && (LogicalAnd) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::LogicalAnd, &&) // || (LogicalOr) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::LogicalOr, ||) // min (Minimum) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Minimum, min) // max (Maximum) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Maximum, max) #undef VTKM_OPENMP_SPECIALIZE_REDUCE #undef VTKM_OPENMP_SPECIALIZE_REDUCE1 #endif // VTKM_OPENMP_USE_NATIVE_REDUCTION }; template <typename KeysInArray, typename ValuesInArray, typename KeysOutArray, typename ValuesOutArray, typename BinaryFunctor> void ReduceByKeyHelper(KeysInArray keysInArray, ValuesInArray valuesInArray, KeysOutArray keysOutArray, ValuesOutArray valuesOutArray, BinaryFunctor functor) { using KeyType = typename KeysInArray::ValueType; using ValueType = typename ValuesInArray::ValueType; vtkm::cont::Token token; const vtkm::Id numValues = keysInArray.GetNumberOfValues(); auto keysInPortal = keysInArray.PrepareForInput(DeviceAdapterTagOpenMP(), token); auto valuesInPortal = valuesInArray.PrepareForInput(DeviceAdapterTagOpenMP(), token); auto keysIn = vtkm::cont::ArrayPortalToIteratorBegin(keysInPortal); auto valuesIn = vtkm::cont::ArrayPortalToIteratorBegin(valuesInPortal); auto keysOutPortal = keysOutArray.PrepareForOutput(numValues, DeviceAdapterTagOpenMP(), token); auto valuesOutPortal = valuesOutArray.PrepareForOutput(numValues, DeviceAdapterTagOpenMP(), token); auto keysOut = vtkm::cont::ArrayPortalToIteratorBegin(keysOutPortal); auto valuesOut = vtkm::cont::ArrayPortalToIteratorBegin(valuesOutPortal); internal::WrappedBinaryOperator<ValueType, BinaryFunctor> f(functor); vtkm::Id outIdx = 0; VTKM_OPENMP_DIRECTIVE(parallel default(none) firstprivate(keysIn, valuesIn, keysOut, valuesOut, f) shared(outIdx) VTKM_OPENMP_SHARED_CONST(numValues)) { int tid = omp_get_thread_num(); int numThreads = omp_get_num_threads(); // Determine bounds for this thread's scan operation: vtkm::Id chunkSize = (numValues + numThreads - 1) / numThreads; vtkm::Id scanIdx = std::min(tid * chunkSize, numValues); vtkm::Id scanEnd = std::min(scanIdx + chunkSize, numValues); auto threadKeysBegin = keysOut + scanIdx; auto threadValuesBegin = valuesOut + scanIdx; auto threadKey = threadKeysBegin; auto threadValue = threadValuesBegin; // Reduce each thread's partition: KeyType rangeKey; ValueType rangeValue; for (;;) { if (scanIdx < scanEnd) { rangeKey = keysIn[scanIdx]; rangeValue = valuesIn[scanIdx]; ++scanIdx; // Locate end of current range: while (scanIdx < scanEnd && static_cast<KeyType>(keysIn[scanIdx]) == rangeKey) { rangeValue = f(rangeValue, valuesIn[scanIdx]); ++scanIdx; } *threadKey = rangeKey; *threadValue = rangeValue; ++threadKey; ++threadValue; } else { break; } } if (tid == 0) { outIdx = static_cast<vtkm::Id>(threadKey - threadKeysBegin); } // Combine the reduction results. Skip tid == 0, since it's already in // the correct location: for (int i = 1; i < numThreads; ++i) { // This barrier ensures that: // 1) Threads remain synchronized through this final reduction loop. // 2) The outIdx variable is initialized by thread 0. // 3) All threads have reduced their partitions. VTKM_OPENMP_DIRECTIVE(barrier) if (tid == i) { // Check if the previous thread's last key matches our first: if (outIdx > 0 && threadKeysBegin < threadKey && keysOut[outIdx - 1] == *threadKeysBegin) { valuesOut[outIdx - 1] = f(valuesOut[outIdx - 1], *threadValuesBegin); ++threadKeysBegin; ++threadValuesBegin; } // Copy reduced partition to final location (if needed) if (threadKeysBegin < threadKey && threadKeysBegin != keysOut + outIdx) { std::copy(threadKeysBegin, threadKey, keysOut + outIdx); std::copy(threadValuesBegin, threadValue, valuesOut + outIdx); } outIdx += static_cast<vtkm::Id>(threadKey - threadKeysBegin); } // end tid == i } // end combine reduction } // end parallel token.DetachFromAll(); keysOutArray.Shrink(outIdx); valuesOutArray.Shrink(outIdx); } template <typename IterT, typename RawPredicateT> struct UniqueHelper { using ValueType = typename std::iterator_traits<IterT>::value_type; using PredicateT = internal::WrappedBinaryOperator<bool, RawPredicateT>; struct Node { vtkm::Id2 InputRange{ -1, -1 }; vtkm::Id2 OutputRange{ -1, -1 }; // Pad the node out to the size of a cache line to prevent false sharing: static constexpr size_t DataSize = 2 * sizeof(vtkm::Id2); static constexpr size_t NumCacheLines = CeilDivide<size_t>(DataSize, VTKM_CACHE_LINE_SIZE); static constexpr size_t PaddingSize = NumCacheLines * VTKM_CACHE_LINE_SIZE - DataSize; unsigned char Padding[PaddingSize]; }; IterT Data; vtkm::Id NumValues; PredicateT Predicate; vtkm::Id LeafSize; std::vector<Node> Nodes; size_t NextNode; UniqueHelper(IterT iter, vtkm::Id numValues, RawPredicateT pred) : Data(iter) , NumValues(numValues) , Predicate(pred) , LeafSize(0) , NextNode(0) { } vtkm::Id Execute() { vtkm::Id outSize = 0; VTKM_OPENMP_DIRECTIVE(parallel default(shared)) { VTKM_OPENMP_DIRECTIVE(single) { this->Prepare(); // Kick off task-based divide-and-conquer uniquification: Node* rootNode = this->AllocNode(); rootNode->InputRange = vtkm::Id2(0, this->NumValues); this->Uniquify(rootNode); outSize = rootNode->OutputRange[1] - rootNode->OutputRange[0]; } } return outSize; } private: void Prepare() { // Figure out how many values each thread should handle: int numThreads = omp_get_num_threads(); vtkm::Id chunksPerThread = 8; vtkm::Id numChunks; ComputeChunkSize( this->NumValues, numThreads, chunksPerThread, sizeof(ValueType), numChunks, this->LeafSize); // Compute an upper-bound of the number of nodes in the tree: std::size_t numNodes = static_cast<std::size_t>(numChunks); while (numChunks > 1) { numChunks = (numChunks + 1) / 2; numNodes += static_cast<std::size_t>(numChunks); } this->Nodes.resize(numNodes); this->NextNode = 0; } Node* AllocNode() { size_t nodeIdx; // GCC emits a false positive "value computed but not used" for this block: #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-value" VTKM_OPENMP_DIRECTIVE(atomic capture) { nodeIdx = this->NextNode; ++this->NextNode; } #pragma GCC diagnostic pop VTKM_ASSERT(nodeIdx < this->Nodes.size()); return &this->Nodes[nodeIdx]; } bool IsLeaf(const vtkm::Id2& range) { return (range[1] - range[0]) <= this->LeafSize; } // Not an strict midpoint, but ensures that the first range will always be // a multiple of the leaf size. vtkm::Id ComputeMidpoint(const vtkm::Id2& range) { const vtkm::Id n = range[1] - range[0]; const vtkm::Id np = this->LeafSize; return CeilDivide(n / 2, np) * np + range[0]; } void Uniquify(Node* node) { if (!this->IsLeaf(node->InputRange)) { vtkm::Id midpoint = this->ComputeMidpoint(node->InputRange); Node* right = this->AllocNode(); Node* left = this->AllocNode(); right->InputRange = vtkm::Id2(midpoint, node->InputRange[1]); // Intel compilers seem to have trouble following the 'this' pointer // when launching tasks, resulting in a corrupt task environment. // Explicitly copying the pointer into a local variable seems to fix this. auto explicitThis = this; VTKM_OPENMP_DIRECTIVE(taskgroup) { VTKM_OPENMP_DIRECTIVE(task) { explicitThis->Uniquify(right); } left->InputRange = vtkm::Id2(node->InputRange[0], midpoint); this->Uniquify(left); } // end taskgroup. Both sides of the tree will be completed here. // Combine the ranges in the left side: if (this->Predicate(this->Data[left->OutputRange[1] - 1], this->Data[right->OutputRange[0]])) { ++right->OutputRange[0]; } vtkm::Id numVals = right->OutputRange[1] - right->OutputRange[0]; DoCopy(this->Data + right->OutputRange[0], this->Data + left->OutputRange[1], numVals); node->OutputRange[0] = left->OutputRange[0]; node->OutputRange[1] = left->OutputRange[1] + numVals; } else { auto start = this->Data + node->InputRange[0]; auto end = this->Data + node->InputRange[1]; end = std::unique(start, end, this->Predicate); node->OutputRange[0] = node->InputRange[0]; node->OutputRange[1] = node->InputRange[0] + static_cast<vtkm::Id>(end - start); } } }; } } } // end namespace vtkm::cont::openmp #endif // vtk_m_cont_openmp_internal_FunctorsOpenMP_h
Allocators.h
#pragma once #include <cstring> #include <vector> #include <omp.h> namespace pfc { // NUMA allocator for ScalarField // Based upon ideas by Georg Hager and Gerhard Wellein template <class Data> class NUMA_Allocator { public: using value_type = Data; using propagate_on_container_move_assignment = true_type; using is_always_equal = true_type; constexpr NUMA_Allocator() noexcept {} constexpr NUMA_Allocator(const NUMA_Allocator&) noexcept = default; template<class OtherData> constexpr NUMA_Allocator(const NUMA_Allocator<OtherData>&) noexcept {} value_type * allocate(const size_t num) { const size_t size_vt = sizeof(value_type); const size_t len = num * size_vt; const size_t num_threads = omp_get_max_threads(); if (num_threads > num) { char * p = reinterpret_cast<char*>(std::malloc(len)); std::memset(p, 0, len); return reinterpret_cast<value_type*>(p); } const size_t block_size = (num / num_threads) * size_vt; const size_t block_size_rem = len - block_size * (num_threads - 1); char * p = reinterpret_cast<char*>(std::malloc(len)); #pragma omp parallel for for (int thr = 0; thr < num_threads; thr++) { const size_t cur_block_size = thr == num_threads - 1 ? block_size_rem : block_size; std::memset(p + thr * block_size, 0, cur_block_size); } return reinterpret_cast<value_type*>(p); } void deallocate(value_type * const p, const size_t num) { std::free(p); } friend int operator==(const NUMA_Allocator& a1, const NUMA_Allocator& a2) { return true; } friend int operator!=(const NUMA_Allocator& a1, const NUMA_Allocator& a2) { return false; } }; }
GB_unaryop__lnot_fp64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_int32 // op(A') function: GB_tran__lnot_fp64_int32 // C type: double // A type: int32_t // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_int32 ( double *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ExampleClusterer_Shared.h
/** * grove: ExampleClusterer_Shared.h * Copyright (c) Torr Vision Group, University of Oxford, 2017. All rights reserved. */ #ifndef H_GROVE_EXAMPLECLUSTERER_SHARED #define H_GROVE_EXAMPLECLUSTERER_SHARED #include <ORUtils/MathUtils.h> #include <ORUtils/PlatformIndependence.h> #include "../../util/Array.h" namespace grove { /** * \brief Computes the final cluster index for the specified example by following the parent links computed in compute_parent. * * \note The compute_parent function split all the examples into subtrees and allocated a cluster index to the root * of each subtree. With this function, we navigate each example's subtree until we find the root and then copy * the cluster index across to the example. We also compute the size of each cluster. * * \param exampleSetIdx The index of the example set containing the example. * \param exampleIdx The index of the example within its example set. * \param exampleSetCapacity The maximum size of each example set. * \param parents An image containing the parent indices for the examples. * \param clusterIndices An image containing the cluster indices for the examples. * \param clusterSizes An array in which to keep track of the size of each cluster. Must contain zeros * at the point at which the function is called. */ _CPU_AND_GPU_CODE_ inline void compute_cluster_index(int exampleSetIdx, int exampleIdx, int exampleSetCapacity, const int *parents, int *clusterIndices, int *clusterSizes) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the raster offset of the specified example in the example sets image. const int exampleOffset = exampleSetOffset + exampleIdx; // Follow the parent links from the specified example up to the root of its subtree. // Note that there is no need to check if the example is valid, since compute_parent // set the parents of invalid examples to themselves. int currentIdx = exampleIdx; int parentIdx = parents[exampleOffset]; while(parentIdx != currentIdx) { currentIdx = parentIdx; parentIdx = parents[exampleSetOffset + parentIdx]; } // Get the cluster index of the subtree root and assign it to this example. const int clusterIdx = clusterIndices[exampleSetOffset + parentIdx]; clusterIndices[exampleOffset] = clusterIdx; // If the cluster is valid then atomically increase its size (it might be invalid if we started from an invalid example). if(clusterIdx >= 0) { // Note: The __CUDA_ARCH__ check is needed because this function is not a template. #if defined(__CUDACC__) && defined(__CUDA_ARCH__) atomicAdd(&clusterSizes[exampleSetOffset + clusterIdx], 1); #else #ifdef WITH_OPENMP #pragma omp atomic #endif clusterSizes[exampleSetOffset + clusterIdx]++; #endif } } /** * \brief Compute the density of examples around an individual example in one of the example sets. * * \param exampleSetIdx The index of the example set containing the example. * \param exampleIdx The index of the example within its example set. * \param exampleSets An image containing the sets of examples to be clustered (one set per row). The width of * the image specifies the maximum number of examples that can be contained in each set. * \param exampleSetSizes The number of valid examples in each example set. * \param exampleSetCapacity The maximum size of each example set. * \param sigma The sigma of the Gaussian used when computing the example density. * \param densities The memory in which to store the density of each example (one example set per row, * one density value per column). */ template <typename ExampleType> _CPU_AND_GPU_CODE_TEMPLATE_ inline void compute_density(int exampleSetIdx, int exampleIdx, const ExampleType *exampleSets, const int *exampleSetSizes, int exampleSetCapacity, float sigma, float *densities) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the raster offset of the specified example in the example sets image. const int exampleOffset = exampleSetOffset + exampleIdx; // Look up the size of the specified example set. const int exampleSetSize = exampleSetSizes[exampleSetIdx]; float density = 0.0f; // If the example is valid, loop over all of the examples in its set and // compute the density based on the examples that are within 3 * sigma of it // (points further away would only make a small contribution to the density). if(exampleIdx < exampleSetSize) { const float threeSigmaSq = (3.0f * sigma) * (3.0f * sigma); const float minusOneOverTwoSigmaSq = -1.0f / (2.0f * sigma * sigma); const ExampleType centreExample = exampleSets[exampleOffset]; for(int i = 0; i < exampleSetSize; ++i) { const ExampleType otherExample = exampleSets[exampleSetOffset + i]; // Note: ExampleType must have a distance_squared function defined for it. const float normSq = distance_squared(centreExample, otherExample); if(normSq < threeSigmaSq) { density += expf(normSq * minusOneOverTwoSigmaSq); } } } densities[exampleOffset] = density; } /** * \brief Computes the parent and initial cluster indices to assign to the specified example as part of the neighbour-linking step * of the really quick shift (RQS) algorithm. * * \note Each example becomes part of a subtree in which each example has as its parent the closest example with higher density. * For details about RQS, see the paper by Fulkerson and Soatto: http://vision.ucla.edu/~brian/papers/fulkerson10really.pdf * * \param exampleSetIdx The index of the example set containing the example. * \param exampleIdx The index of the example within its example set. * \param exampleSets An image containing the sets of examples to be clustered (one set per row). The width of * the image specifies the maximum number of examples that can be contained in each set. * \param exampleSetCapacity The maximum number of examples in an example set. * \param exampleSetSizes The number of valid examples in each example set. * \param densities An image containing the density of each example (one set per row, one density value per column). * \param tauSq The square of the maximum distance allowed between examples if they are to be linked. * \param parents An image in which to store a parent index for each example. This will generally be the index of * another example in the same set, but may be that of the example itself if it is a subtree root. * \param clusterIndices An image in which to store an initial cluster index for each example. The initial cluster index * for an example will be set to -1 unless the example is a subtree root. * \param nbClustersPerExampleSet An array in which to keep track of the number of clusters in each example set. Must contain zeros * at the point at which the function is called. */ template <typename ExampleType> _CPU_AND_GPU_CODE_TEMPLATE_ inline void compute_parent(int exampleSetIdx, int exampleIdx, const ExampleType *exampleSets, int exampleSetCapacity, const int *exampleSetSizes, const float *densities, float tauSq, int *parents, int *clusterIndices, int *nbClustersPerExampleSet) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the raster offset of the specified example in the example sets image. const int exampleOffset = exampleSetOffset + exampleIdx; // Look up the size of the specified example set. const int exampleSetSize = exampleSetSizes[exampleSetIdx]; // Unless it becomes part of a subtree, each example starts as its own parent. int parentIdx = exampleIdx; // The index of the cluster associated with the specified example (-1 except for subtree roots). int clusterIdx = -1; // If the specified example is valid: if(exampleIdx < exampleSetSize) { // Read in the example and its density from global memory. const ExampleType centreExample = exampleSets[exampleOffset]; const float centreDensity = densities[exampleOffset]; // We are only interested in examples whose distance to the specified example is less than tau. float minDistanceSq = tauSq; // For each other example in the specified example's set: for(int i = 0; i < exampleSetSize; ++i) { if(i == exampleIdx) continue; // Read in the other example and its density from global memory. const ExampleType otherExample = exampleSets[exampleSetOffset + i]; const float otherDensity = densities[exampleSetOffset + i]; // Compute the squared distance between the specified example and the other example. // Note: ExampleType must have a distance_squared function defined for it. const float otherDistSq = distance_squared(centreExample, otherExample); // We are looking for the closest example with a higher density (doesn't matter by how much) than that of the specified example. if(otherDensity > centreDensity && otherDistSq < minDistanceSq) { minDistanceSq = otherDistSq; parentIdx = i; } } // If the specified example is still its own parent (i.e. it is a subtree root), we didn't find any close // example with a higher density, so grab a unique cluster index for the example. if(parentIdx == exampleIdx) { #ifdef __CUDACC__ clusterIdx = atomicAdd(&nbClustersPerExampleSet[exampleSetIdx], 1); #else #ifdef WITH_OPENMP3 #pragma omp atomic capture #elif WITH_OPENMP #pragma omp critical #endif clusterIdx = nbClustersPerExampleSet[exampleSetIdx]++; #endif } } // Write the parent of the specified example to global memory. parents[exampleOffset] = parentIdx; // Write the cluster index associated with the example to global memory. (This will be -1 unless the example is a subtree root.) clusterIndices[exampleOffset] = clusterIdx; } /** * \brief Computes the parameters for and stores the specified selected cluster for the specified example set. * * \note The examples in the set must already have been grouped into clusters by the other functions in this file, * and suitable clusters must have been selected for creation. * \note The actual cluster parameters depend on the ClusterType; for this reason, cluster creation is delegated to a function * called create_cluster_from_examples, which must be defined (elsewhere) for the relevant ExampleType and ClusterType pair. * * \param exampleSetIdx The index of the example set for which the selected cluster is being created. * \param selectedClusterIdx The index of the selected cluster to create (this is an index into the selected clusters array). * \param exampleSets An image containing the sets of examples that have been clustered (one set per row). The width * of the image specifies the maximum number of examples that can be contained in each set. * \param exampleSetSizes The number of valid examples in each example set. * \param exampleSetCapacity The maximum size of each example set. * \param clusterIndices An image containing the cluster indices for the examples. * \param selectedClusters The indices of the clusters selected for each example set. * \param maxSelectedClusters The maximum number of clusters to extract from each example set. * \param clusterContainers The output containers in which to store the clusters created for each example set. */ template <typename ExampleType, typename ClusterType, int MaxClusters> _CPU_AND_GPU_CODE_TEMPLATE_ inline void create_selected_cluster(int exampleSetIdx, int selectedClusterIdx, const ExampleType *exampleSets, const int *exampleSetSizes, int exampleSetCapacity, const int *clusterIndices, const int *selectedClusters, int maxSelectedClusters, Array<ClusterType,MaxClusters> *clusterContainers) { // Compute the linear offset to the beginning of the data associated with the selected clusters for the specified example set. const int selectedClustersOffset = exampleSetIdx * maxSelectedClusters; // Look up the real cluster index of the specified selected cluster (e.g. if this is the second selected // cluster for the example set, and selectedClusters[1] = 23, then the real cluster index is 23). const int clusterIdx = selectedClusters[selectedClustersOffset + selectedClusterIdx]; // If the specified selected cluster is valid: if(clusterIdx >= 0) { // Compute the linear offset to the beginning of the data associated with the specified example set // in the example sets and cluster indices images. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Get a reference to the output clusters array for the specified example set. Array<ClusterType,MaxClusters>& outputClusters = clusterContainers[exampleSetIdx]; // Compute the index in the output clusters array at which to store the selected cluster once it has been created. int outputClusterIdx = -1; #ifdef __CUDACC__ outputClusterIdx = atomicAdd(&outputClusters.size, 1); #else #ifdef WITH_OPENMP3 #pragma omp atomic capture #elif WITH_OPENMP #pragma omp critical #endif outputClusterIdx = outputClusters.size++; #endif // Create the cluster and write it into the output clusters array at the specified location. create_cluster_from_examples( clusterIdx, exampleSets + exampleSetOffset, clusterIndices + exampleSetOffset, exampleSetSizes[exampleSetIdx], outputClusters.elts[outputClusterIdx] ); } } /** * \brief Resets a cluster container. * * \param exampleSetIdx The index of the example set whose cluster container we want to reset. * \param clusterContainers A pointer to the cluster containers. */ template <typename ClusterType, int MaxClusters> _CPU_AND_GPU_CODE_TEMPLATE_ inline void reset_cluster_container(int exampleSetIdx, Array<ClusterType,MaxClusters> *clusterContainers) { // It is sufficient to just reset the size of the container (i.e. the number of clusters) to zero. // There is no need to modify the actual clusters, since they will be overwritten later. clusterContainers[exampleSetIdx].size = 0; } /** * \brief Resets the temporary variables associated with the specified example set. * * \param exampleSetIdx The index of the example set for which we are resetting the temporary variables. * \param exampleSetCapacity The maximum size of each example set. * \param nbClustersPerExampleSet The number of clusters extracted from each example set. * \param clusterSizes An image containing the sizes of the extracted clusters (for all example sets). * \param clusterSizeHistograms The histograms of cluster sizes for the different example sets. */ _CPU_AND_GPU_CODE_ inline void reset_temporaries_for_set(int exampleSetIdx, int exampleSetCapacity, int *nbClustersPerExampleSet, int *clusterSizes, int *clusterSizeHistograms) { // Reset the number of clusters extracted from the specified example set to zero. nbClustersPerExampleSet[exampleSetIdx] = 0; // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // The example set's histogram has an additional value that needs to be reset, so we need a separate offset. const int histogramOffset = exampleSetIdx * (exampleSetCapacity + 1); // Reset the cluster sizes and histogram values associated with the specified example set. for(int i = 0; i < exampleSetCapacity; ++i) { clusterSizes[exampleSetOffset + i] = 0; clusterSizeHistograms[histogramOffset + i] = 0; } // We also need to reset the last element of the histogram. clusterSizeHistograms[histogramOffset + exampleSetCapacity] = 0; } /** * \brief Selects the largest clusters for the specified example set and writes their indices into the selected clusters image. * * \param exampleSetIdx The index of the example set for which to select clusters. * \param clusterSizes An image containing the sizes of the extracted clusters (for all example sets). * \param clusterSizeHistograms The histograms of cluster sizes for the different example sets. * \param nbClustersPerExampleSet The number of clusters extracted from each example set. * \param exampleSetCapacity The mamimum size of each example set. * \param maxSelectedClusters The maximum number of clusters to keep for each example set. * \param minClusterSize The minimum size of cluster to keep. * \param selectedClusters An image in which to store the indices of the clusters selected for each example set. */ _CPU_AND_GPU_CODE_ inline void select_clusters_for_set(int exampleSetIdx, const int *clusterSizes, const int *clusterSizeHistograms, const int *nbClustersPerExampleSet, int exampleSetCapacity, int maxSelectedClusters, int minClusterSize, int *selectedClusters) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the linear offset to the beginning of the data associated with the specified example set's histogram. const int histogramOffset = exampleSetIdx * (exampleSetCapacity + 1); // Look up the number of valid clusters associated with the specified example set. const int nbValidClusters = nbClustersPerExampleSet[exampleSetIdx]; // Compute the linear offset to the beginning of the data associated with the selected clusters for the specified example set. const int selectedClustersOffset = exampleSetIdx * maxSelectedClusters; // Reset the selected clusters for the specified example set (by setting all selected cluster indices to an invalid value). for(int i = 0; i < maxSelectedClusters; ++i) { selectedClusters[selectedClustersOffset + i] = -1; } // Starting from the largest clusters, scan downwards in the histogram to find the minimum size of cluster // we need to consider in order to try to select maxSelectedClusters clusters. Note that we will not be // able to select maxSelectedClusters if there are fewer suitable clusters than that to start with; if // that happens, we simply keep all of the suitable clusters we do have. int nbSelectedClusters = 0; int nbSmallestClustersToKeep = 0; int minSelectedClusterSize = exampleSetCapacity; while(minSelectedClusterSize > minClusterSize && nbSelectedClusters < maxSelectedClusters) { --minSelectedClusterSize; nbSmallestClustersToKeep = MIN(clusterSizeHistograms[histogramOffset + minSelectedClusterSize], maxSelectedClusters - nbSelectedClusters); nbSelectedClusters += nbSmallestClustersToKeep; } // If we couldn't find any suitable clusters at all, early out. if(nbSelectedClusters == 0) return; // Now walk through all of the clusters we do have, selecting (a) all of those whose size is strictly greater // than the minimum selected cluster size, and (b) as many as necessary of those whose size is exactly equal // to the minimum selected cluster size. nbSelectedClusters = 0; int nbSmallestClustersKept = 0; for(int clusterIdx = 0; clusterIdx < nbValidClusters; ++clusterIdx) { const int clusterSize = clusterSizes[exampleSetOffset + clusterIdx]; if(clusterSize > minSelectedClusterSize || (clusterSize == minSelectedClusterSize && nbSmallestClustersKept++ < nbSmallestClustersToKeep)) { selectedClusters[selectedClustersOffset + nbSelectedClusters++] = clusterIdx; } } // Sort the selected clusters in non-increasing order of size using a simple selection sort. // Note: Selection sort is quadratic, but the number of clusters is small enough for now that it doesn't matter. for(int i = 0; i < nbSelectedClusters; ++i) { // Find a cluster with maximum size in selectedClusters[i..nbSelectedClusters). int maxSize = clusterSizes[exampleSetOffset + selectedClusters[selectedClustersOffset + i]]; int maxIdx = i; for(int j = i + 1; j < nbSelectedClusters; ++j) { int size = clusterSizes[exampleSetOffset + selectedClusters[selectedClustersOffset + j]]; if(size > maxSize) { maxSize = size; maxIdx = j; } } // If selectedClusters[i] wasn't the maximal cluster, swap it with the cluster that was. if(maxIdx != i) { int temp = selectedClusters[selectedClustersOffset + i]; selectedClusters[selectedClustersOffset + i] = selectedClusters[selectedClustersOffset + maxIdx]; selectedClusters[selectedClustersOffset + maxIdx] = temp; } } } /** * \brief Updates the cluster size histogram for the specified example set based on the size of the specified cluster. * * \note The cluster size histograms will be used later to select the largest clusters in each example set. * * \param exampleSetIdx The index of the example set whose histogram should be updated. * \param clusterIdx The index of the cluster whose size should be used to update the histogram. * \param clusterSizes An image containing the sizes of the extracted clusters (for all example sets). * \param clusterSizeHistograms An image storing a cluster size histogram for each example set under consideration (one histogram per row). * \param exampleSetCapacity The maximum number of elements in each example set. */ _CPU_AND_GPU_CODE_ inline void update_cluster_size_histogram(int exampleSetIdx, int clusterIdx, const int *clusterSizes, int *clusterSizeHistograms, int exampleSetCapacity) { // Compute the linear offset to the beginning of the data associated with the specified example set. const int exampleSetOffset = exampleSetIdx * exampleSetCapacity; // Compute the linear offset to the beginning of the data associated with the specified example set's histogram. const int histogramOffset = exampleSetIdx * (exampleSetCapacity + 1); // Look up the size of the specified cluster. const int clusterSize = clusterSizes[exampleSetOffset + clusterIdx]; // Atomically increment the corresponding bin in the histogram. // Note: The __CUDA_ARCH__ check is needed because this function is not a template. #if defined(__CUDACC__) && defined(__CUDA_ARCH__) atomicAdd(&clusterSizeHistograms[histogramOffset + clusterSize], 1); #else #ifdef WITH_OPENMP #pragma omp atomic #endif clusterSizeHistograms[histogramOffset + clusterSize]++; #endif } } #endif
dualpivot_tasks_adaptive.h
#ifndef QUICKSORTMP_DUALPIVOT_TASKS_ADAPTIVE_H #define QUICKSORTMP_DUALPIVOT_TASKS_ADAPTIVE_H #include <omp.h> #include <stddef.h> #include "partition.h" #include "order.h" #include "insertion_sort.h" static void sort_parallel_adaptive( void* restrict array, ptrdiff_t lower_index, ptrdiff_t higher_index, size_t size, enum Order compare(const void*, const void*) ) { if (lower_index < higher_index) { size_t insertion_length = 0; if ((insertion_length = higher_index - lower_index) < INSERTION_SORT_THRESHOLD) { register unsigned char* arr_ptr = array; insertion_sort(arr_ptr + size * lower_index, insertion_length + 1, size, compare); } else { ptrdiff_t left_pivot = 0; ptrdiff_t right_pivot = 0; partition(array, lower_index, higher_index, &left_pivot, &right_pivot, size, compare); if ((insertion_length = left_pivot - lower_index) > INSERTION_SORT_THRESHOLD) { #pragma omp task default(none) firstprivate(array, lower_index, left_pivot, size, compare) { sort_parallel_adaptive(array, lower_index, left_pivot - 1, size, compare); } } else { register unsigned char* arr_ptr = array; insertion_sort(arr_ptr + size * lower_index, insertion_length + 1, size, compare); } if ((insertion_length = right_pivot - left_pivot) > INSERTION_SORT_THRESHOLD) { #pragma omp task default(none) firstprivate(array, left_pivot, right_pivot, size, compare) { sort_parallel_adaptive(array, left_pivot + 1, right_pivot - 1, size, compare); } } else { register unsigned char* arr_ptr = array; insertion_sort(arr_ptr + size * left_pivot, insertion_length + 1, size, compare); } if ((insertion_length = higher_index - right_pivot) > INSERTION_SORT_THRESHOLD) { #pragma omp task default(none) firstprivate(array, right_pivot, higher_index, size, compare) { sort_parallel_adaptive(array, right_pivot + 1, higher_index, size, compare); } } else { register unsigned char* arr_ptr = array; insertion_sort(arr_ptr + size * right_pivot, insertion_length + 1, size, compare); } } } } void dual_pivot_quicksort_tasks_adaptive( void* restrict array, size_t length, size_t size, enum Order compare(const void*, const void*) ) { if (length > 10000){ #pragma omp parallel default(none) shared(array, length, size, compare) { #pragma omp single nowait { sort_parallel_adaptive(array, 0, length - 1, size, compare); } } } else { insertion_sort(array, length, size, compare); } } #endif //QUICKSORTMP_DUALPIVOT_TASKS_ADAPTIVE_H
hybrid_whereami.c
/* Program hybrid_whereami reports the mask for each OMP thread for each MPI process, and works for nsec seconds (10). This allows one to inspect occupation through utilities like top (e.g. execute top, then hit the 1 key). Uses maskeraid utilities github.com/TACC/maskeraid mpi_report_mask(): in pure MPI region to report MPI process masks hybrid_report_mask(): in OpenMP parallel region to report thread masks map_to_cpuid( cpuid ): sets thread affinity to cpu_id (see /proc/cpuinfo, or hwloc) load_cpu_nsec(nsec): loads the cpu for nsec (default 10) hybrid_whereami.c is a driver for: 1.) Get line arguments (optional): help or number of seconds for load 2.) Start MPI Affinity for MPI processes can be reset here. mpi_report_mask() reports MPI process masks 3.) Start OpenMP parallel region hybrid_report_mask() reports masks for each thread of each MPI process. 4.) Set a work load on each thread 5.) Finish parallel region 6.) Stop MPI Kent Milfeld 12/16/15 Update to separate require a single call for OpenMP hybrid. Uses multi-threaded MPI initialization Kent Milfeld 2015/07/13 */ #include <stdio.h> #include <omp.h> #include <mpi.h> #include <unistd.h> #include <stdlib.h> void load_cpu_nsec(int nsec); void hybrid_report_mask(void); int map_to_cpuid( int icore); int main(int argc, char **argv){ int rank, nranks; // MPI variables. int nthrds, thrd, cpuid; //Thread info int requested=MPI_THREAD_MULTIPLE, provided; int nsec = 10; // Load, default time int ierr; // Error number cmdln_get_nsec_or_help( &nsec, argc, argv); //optional, get nsec from cmd line // thread safe init replaces MPI_Init(&argc, &argv); MPI_Init_thread(&argc, &argv, requested, &provided); MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); mpi_report_mask(); // Report JUST MPI process masks #pragma omp parallel private(thrd,nthrds,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); // cpuid = thrd; // set cpuid to thread number (thrd) // ierr = map_to_cpuid( cpuid ); // set your own affinity here hybrid_report_mask(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } MPI_Finalize(); }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1 << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if( SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if( (IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); (void) InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImage) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*black)); white=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL, GetPixelChannels(image)*sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImage) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum *magick_restrict r; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(enhance_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); continue; } GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); SetPixelViaPixelInfo(image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL, GetPixelChannels(image)*sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap(intensity)+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) ResetMagickMemory(black,0,sizeof(*black)); (void) ResetMagickMemory(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image,RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image,BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImage) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,1.0/gamma))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; #else if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,1.0/gamma); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,1.0/gamma); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,1.0/gamma); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].alpha,1.0/gamma); #endif } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; #if !defined(MAGICKCORE_HDRI_SUPPORT) q[j]=gamma_map[ScaleQuantumToMap(q[j])]; #else q[j]=QuantumRange*gamma_pow(QuantumScale*q[j],1.0/gamma); #endif } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImage) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GrayscaleImage) #endif proceed=SetImageProgress(image,GrayscaleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); pixel2=zero; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); pixel3=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, point.y,&pixel3); offset+=cube_size; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, point.y,&pixel4); pixel=zero; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, point.z,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImage) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; if (fabs(white_point-black_point) < MagickEpsilon) return(pixel); scale=1.0/(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), 1.0/gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImage) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImage) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if( grayscale != MagickFalse ) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if ((GetPixelWriteMask(image,q) == 0) || IsPixelGray(image,q) != MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImage) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImage) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImage) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
pr35244.c
/* PR c++/35244 */ /* { dg-do compile } */ /* { dg-require-effective-target tls } */ /* { dg-options "-fopenmp" } */ int v1; typedef struct A A; typedef int i; #pragma omp threadprivate (i) /* { dg-error "expected identifier before" } */ #pragma omp threadprivate (A) /* { dg-error "expected identifier before" } */ #pragma omp threadprivate (v1) void foo () { static int v4; { static int v5; #pragma omp threadprivate (v4, v5) } }
cpu_solver.c
/** \file cpu_solver.c * \brief Codice host degli algoritmo del solver. * * */ #ifndef MAX_THREADS #define MAX_THREADS 16 #endif #ifndef CHUNK_SIZE #define CHUNK_SIZE 4 #endif #include "cpu_solver.h" #include <omp.h> extern char* str; extern int counter_nodi; extern int counter_nodi0; //extern int counter_nodi0_1; //extern int counter_nodi0_2; extern int num_nodi; extern int num_archi; extern int max_pesi; extern int MG_pesi; extern char *nodeFlags; extern int *host_csrPtrInSuccLists; extern int *host_csrSuccLists; extern int *host_csrPesiArchi; extern int *host_cscPtrInPredLists; extern int *host_cscPredLists; extern int *host_cscPesiArchiPred; extern int numLowInDegree[4]; extern int numAllInDegree[LEN_DEGREEHIST]; extern int numAllOutDegree[LEN_DEGREEHIST]; extern int *host_ResNodeValues1; extern int *host_ResNodeValues2; extern int *host_ResNodeValuesAux; extern int *host_csrDataArchiAux; extern uint timeout_expired; extern config configuration; extern stat statistics; void EG0_scpu_solver() { int idx; int idy; int val; long max_loop = configuration.max_loop_val; long loop; int *data1; int *data2; int *temp; int tempval; int flag1=0; printf("Initializing cpu EG0 solver.\n"); max_loop = aggiorna_max_loop((long)num_archi, (long)num_nodi, (long)MG_pesi, max_loop); data1 = host_ResNodeValues1; data2 = host_ResNodeValues2; for (idx=0; idx<counter_nodi; idx++) { data1[idx] =0; data2[idx] =0; } printf("Running EG0 on CPU. (MG_pesi=%d max_loop=%ld num nodes=%d num archi=%d max weight=%d\n", MG_pesi, max_loop, num_nodi, num_archi, max_pesi); fflush(stdout); for (loop=1; loop<=max_loop; loop++) { flag1=0; for (idx=0; idx<counter_nodi; idx++) { tempval = OMINUS(data1[host_csrSuccLists[host_csrPtrInSuccLists[idx]]] , host_csrPesiArchi[host_csrPtrInSuccLists[idx]]); for (idy=(host_csrPtrInSuccLists[idx])+1; idy < host_csrPtrInSuccLists[idx+1]; idy++) { val = OMINUS(data1[host_csrSuccLists[idy]] , host_csrPesiArchi[idy]); if ((idx<counter_nodi0) && (tempval > val)) { tempval = val; } if ((idx>=counter_nodi0) && (tempval < val)) { tempval = val; } } if (data2[idx] < tempval) { flag1=1; data2[idx] = tempval; } } temp = data1; data1 = data2; data2 = temp; //swap ruoli degli array if (flag1 == 0) {break;} if (timeout_expired == 1) {break;} } if ((max_loop%2) != 0) { //se numero loop e' dispari, il risultato e' nell'array host_ResNodeValues2[]. Lo metto in host_ResNodeValues1[] temp = host_ResNodeValues1; host_ResNodeValues1 = host_ResNodeValues2; host_ResNodeValues2 = temp; } printf("End EG0 on CPU after %ld loops (each loop involves all nodes) (flag1=%d)\n", loop-1, flag1); statistics.processedNodes = ((long)(loop-1))*((long)num_nodi); } void EG0_cpu_solver() { int idx; int idy; long max_loop = configuration.max_loop_val; long loop; int *data1; int *data2; int *temp; // Varaible for OpenMP thread management int flag_per_thread[MAX_THREADS]; static int tid; static int nthx=1; int c=0; for (c = 0; c < MAX_THREADS ; c++) flag_per_thread[c] = 0; #pragma omp threadprivate(tid) #pragma omp parallel { tid = omp_get_thread_num(); nthx = omp_get_num_threads(); nthx = (nthx < MAX_THREADS) ? nthx : MAX_THREADS; } printf("Initializing cpu EG0 solver using (%d Mthreads).\n", nthx); max_loop = aggiorna_max_loop((long)num_archi, (long)num_nodi, (long)MG_pesi, max_loop); data1 = host_ResNodeValues1; data2 = host_ResNodeValues2; for (idx=0; idx<counter_nodi; idx++) { data1[idx] =0; data2[idx] =0; } //printf("Running EG0 on CPU. (MG_pesi=%d max_loop=%ld num nodes=%d num archi=%d max weight=%d\n", MG_pesi, max_loop, num_nodi, num_archi, max_pesi); fflush(stdout); for (loop=1; loop<=max_loop; loop++) { for (c = 0; c < nthx; c++) flag_per_thread[c] = 0; #pragma omp parallel for schedule(guided, CHUNK_SIZE) for (idx=0; idx<counter_nodi; idx++) { int tempval = OMINUS(data1[host_csrSuccLists[host_csrPtrInSuccLists[idx]]] , host_csrPesiArchi[host_csrPtrInSuccLists[idx]]); for (idy=(host_csrPtrInSuccLists[idx])+1; idy < host_csrPtrInSuccLists[idx+1]; idy++) { int val = OMINUS(data1[host_csrSuccLists[idy]] , host_csrPesiArchi[idy]); if ((idx<counter_nodi0) && (tempval > val)) { tempval = val; } if ((idx>=counter_nodi0) && (tempval < val)) { tempval = val; } } if (data2[idx] < tempval) { flag_per_thread[tid]=1; data2[idx] = tempval; } } int check = 0; temp = data1; data1 = data2; data2 = temp; //swap ruoli degli array for (c = 0; c < nthx; c++){ if (flag_per_thread[c] == 0){ check += flag_per_thread[c]; } else { check += flag_per_thread[c]; break; } } if (check == 0) {break;} if (timeout_expired == 1) {break;} } if ((max_loop%2) != 0) { //se numero loop e' dispari, il risultato e' nell'array host_ResNodeValues2[]. Lo metto in host_ResNodeValues1[] temp = host_ResNodeValues1; host_ResNodeValues1 = host_ResNodeValues2; host_ResNodeValues2 = temp; } printf("End EG0 on CPU after %ld loops (each loop involves all nodes)\n", loop-1); statistics.processedNodes = ((long)(loop-1))*((long)num_nodi); } void EG_cpu_solver() { int idx; int idy; int val; long max_loop = configuration.max_loop_val; long loop; int *data1 = host_ResNodeValues1; // vettore dei risultati (f(v)) inizialmente gia' azzerato int temp; int flag1=0; int *stackL = host_csrDataArchiAux; //uso spazio host_csrDataArchiAux[] per memorizzare l'insieme L dei nodi con (potenziali) "inconsistenze" //inoltre uso il vettore di flags nodeFlags[] come bitmap per evitare di inserire doppi in stackL[] // N.B:: dato che ogni nodo ha in-degree e out-degree non nulli si ha num_archi>=num_nodi int top_stackL = 0; // altezza dello stackL //SCEGLIENDO questa si usa L come stack //int *queueL = stackL; // invece di stack uso queue //SCEGLIENDO questa si usa L come queue int in_queueL = 0; int out_queueL = 0; int len_queueL = 0; // Ora usa L come queue, le linee di codice per usare L come stack sono commentate. // Sperimentalmente, sembra che queue si comporti in media meglio di stack (per le istanze viste) printf("Initializing cpu EG solver.\n"); max_loop = aggiorna_max_loop((long)num_archi, (long)num_nodi, (long)MG_pesi, max_loop); // inizializza flags e stackL[] (= i nodi "inconsistenti") for (idx=0; idx<counter_nodi0; idx++) { temp=1; // finora sono tutti negativi idy=(host_csrPtrInSuccLists[idx]); while ((temp==1) && (idy < host_csrPtrInSuccLists[idx+1])) { if (host_csrPesiArchi[idy] >= 0) { temp = 0; } idy++; } if (temp==1) { // tutti outedges negativi nodeFlags[idx]=1; stackL[top_stackL++] = idx; } } for (idx=counter_nodi0; idx<counter_nodi; idx++) { temp=1; // finora sono nessun negativo idy=(host_csrPtrInSuccLists[idx]); while ((temp==1) && (idy < host_csrPtrInSuccLists[idx+1])) { if (host_csrPesiArchi[idy] < 0) { temp = 0; } idy++; } if (temp==0) { // almeno un outedge negativo nodeFlags[idx]=1; stackL[top_stackL++] = idx; } } in_queueL = top_stackL; out_queueL = 0; len_queueL = in_queueL-out_queueL; host_csr2csc(num_nodi, num_nodi, num_archi, host_csrPesiArchi, host_csrSuccLists, host_csrPtrInSuccLists, host_cscPesiArchiPred, host_cscPredLists, host_cscPtrInPredLists); /* Calcolo gli in-degree */ { int i,d; for (i=0; i<num_nodi; i++) { d = host_cscPtrInPredLists[i+1] - host_cscPtrInPredLists[i]; if (d<(LEN_DEGREEHIST-1)) { (numAllInDegree[d])++; } else { (numAllInDegree[LEN_DEGREEHIST-1])++; } if (d<4) { (numLowInDegree[d])++; } } printf("\tLow in-degree: 0: %d \t1: %d \t2: %d \t3: %d\n", numLowInDegree[0], numLowInDegree[1], numLowInDegree[2], numLowInDegree[3]);fflush(stdout); } if (configuration.print_degree_hist == YES_PRINT_DEGREEHIST) { int hist; printf("In degrees:,%s,", (configuration.stdinputsource == 1)?"stdin":configuration.filename); for (hist=0; hist<(LEN_DEGREEHIST-1); hist++) { printf("%d,", numAllInDegree[hist]); } printf("%d\n", numAllInDegree[LEN_DEGREEHIST-1]); printf("Out degrees:,%s,", (configuration.stdinputsource == 1)?"stdin":configuration.filename); for (hist=0; hist<(LEN_DEGREEHIST-1); hist++) { printf("%d,", numAllOutDegree[hist]); } printf("%d\n", numAllOutDegree[LEN_DEGREEHIST-1]); } // Loop di calcolo dell'initial credit problem: printf("Running EG on CPU. (MG_pesi=%d max_loop=%ld num nodes=%d num archi=%d max weight=%d\n", MG_pesi, max_loop, num_nodi, num_archi, max_pesi); fflush(stdout); loop=1; //while ((top_stackL>0) && (loop<=max_loop)) { while ((len_queueL>0) && (loop<=max_loop)) { // DUE modi (a seconda che L sia usato come stack o queue) per non selezionare gli elementi di L in ordine, ma pseudo-casualmente: //int r = rand()%top_stackL; int tt = stackL[r]; stackL[r] = stackL[top_stackL]; stackL[top_stackL] = tt; //int r = rand()%len_queueL; int tt = stackL[(out_queueL+r)%num_nodi]; stackL[(out_queueL+r)%num_nodi] = stackL[out_queueL]; stackL[out_queueL] = tt; // Solo per sperimentare. Ora DISATTIVATI. loop++; flag1=0; //idx = stackL[--top_stackL]; //preleva uno dei nodi da processare usando L come stack idx = stackL[out_queueL]; out_queueL=(out_queueL+1)%num_nodi; //preleva uno dei nodi da processare usando L come queue len_queueL--; nodeFlags[idx]=0; //riazzero per il ciclo successivo temp = OMINUS(data1[host_csrSuccLists[host_csrPtrInSuccLists[idx]]] , host_csrPesiArchi[host_csrPtrInSuccLists[idx]]); for (idy=(host_csrPtrInSuccLists[idx])+1; idy < host_csrPtrInSuccLists[idx+1]; idy++) { val = OMINUS(data1[host_csrSuccLists[idy]] , host_csrPesiArchi[idy]); if ((idx<counter_nodi0) && (temp > val)) { temp = val; } if ((idx>=counter_nodi0) && (temp < val)) { temp = val; } } // aggiunge predecessori del nodo aggiornato if (data1[idx] < temp) { // il valore aumenta //printf(" %d : %d --> %d)\n",idx,data1[idx],temp); data1[idx] = temp; flag1=1; // aggiugi PREDs in stackL GREZZO: aggiunge sempre!!! non usa counter come nell'articolo di Raffaella&C. int idz; for (idz=host_cscPtrInPredLists[idx]; idz<host_cscPtrInPredLists[idx+1]; idz++) { if (nodeFlags[host_cscPredLists[idz]] == 0) { //stackL[top_stackL++] = host_cscPredLists[idz]; // L come stack stackL[in_queueL] = host_cscPredLists[idz]; in_queueL=(in_queueL+1)%num_nodi; // L come queue nodeFlags[host_cscPredLists[idz]]=1; len_queueL++; // L come queue } } } //if ((flag1 == 0) && (top_stackL==0)) {break;} // L come stack if ((flag1 == 0) && (len_queueL==0)) {break;} // L come queue if (timeout_expired == 1) {break;} } printf("End EG on CPU after %ld loops (each loop involves one node only) (flag1=%d)\n", loop-1, flag1); statistics.processedNodes = ((long)(loop-1)); } void cpu_solver() { struct timeb tp; double deltacpusoltime; ftime(&tp); deltacpusoltime = ((double)((long int)tp.time)); deltacpusoltime += ((double)tp.millitm)/1000; switch (configuration.algoritmo) { case ALGOR_EG0: // versione di EG naive (presa da master per facilitare multithread) EG0_cpu_solver(); break; case ALGOR_EG: EG_cpu_solver(); break; default: EG_cpu_solver(); break; } ftime(&tp); statistics.solvingtime = ((((double)((long int)tp.time)) + (((double)tp.millitm)/1000)) - deltacpusoltime); }
fftw++.h
/* Fast Fourier transform C++ header class for the FFTW3 Library Copyright (C) 2004-16 John C. Bowman, University of Alberta Malcolm Roberts, University of Strasbourg This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __fftwpp_h__ #define __fftwpp_h__ 1 #define __FFTWPP_H_VERSION__ 2.05 #include <cstdlib> #include <fstream> #include <iostream> #include <fftw3.h> #include <cerrno> #include <map> #ifndef _OPENMP #ifndef FFTWPP_SINGLE_THREAD #define FFTWPP_SINGLE_THREAD #endif #endif #ifndef FFTWPP_SINGLE_THREAD #include <omp.h> #endif inline int get_thread_num() { #ifdef FFTWPP_SINGLE_THREAD return 0; #else return omp_get_thread_num(); #endif } inline int get_max_threads() { #ifdef FFTWPP_SINGLE_THREAD return 1; #else return omp_get_max_threads(); #endif } #ifndef FFTWPP_SINGLE_THREAD #define PARALLEL(code) \ if(threads > 1) { \ _Pragma("omp parallel for num_threads(threads)") \ code \ } else { \ code \ } #else #define PARALLEL(code) \ { \ code \ } #endif #ifndef __Complex_h__ #include <complex> typedef std::complex<double> Complex; #endif #include "seconds.h" #include "statistics.h" #include "align.h" namespace fftwpp { // Obsolete names: #define FFTWComplex ComplexAlign #define FFTWdouble doubleAlign #define FFTWdelete deleteAlign class fftw; extern "C" fftw_plan Planner(fftw *F, Complex *in, Complex *out); void LoadWisdom(); void SaveWisdom(); extern const char *inout; struct threaddata { unsigned int threads; double mean; double stdev; threaddata() : threads(0), mean(0.0), stdev(0.0) {} threaddata(unsigned int threads, double mean, double stdev) : threads(threads), mean(mean), stdev(stdev) {} }; class fftw; class ThreadBase { protected: unsigned int threads; unsigned int innerthreads; public: ThreadBase(); ThreadBase(unsigned int threads) : threads(threads) {} void Threads(unsigned int nthreads) {threads=nthreads;} unsigned int Threads() {return threads;} void multithread(unsigned int nx) { if(nx >= threads) { innerthreads=1; } else { innerthreads=threads; threads=1; } } }; inline unsigned int realsize(unsigned int n, Complex *in, Complex *out=NULL) { return (!out || in == out) ? 2*(n/2+1) : n; } inline unsigned int realsize(unsigned int n, Complex *in, double *out) { return realsize(n,in,(Complex *) out); } inline unsigned int realsize(unsigned int n, double *in, Complex *out) { return realsize(n,(Complex *) in,out); } // Base clase for fft routines // class fftw : public ThreadBase { protected: unsigned int doubles; // number of double precision values in dataset int sign; unsigned int threads; double norm; fftw_plan plan; bool inplace; unsigned int Dist(unsigned int n, size_t stride, size_t dist) { return dist ? dist : ((stride == 1) ? n : 1); } static const double twopi; public: static unsigned int effort; static unsigned int maxthreads; static double testseconds; static const char *WisdomName; static fftw_plan (*planner)(fftw *f, Complex *in, Complex *out); virtual unsigned int Threads() {return threads;} static const char *oddshift; // Inplace shift of Fourier origin to (nx/2,0) for even nx. static void Shift(Complex *data, unsigned int nx, unsigned int ny, unsigned int threads) { unsigned int nyp=ny/2+1; unsigned int stop=nx*nyp; if(nx % 2 == 0) { unsigned int inc=2*nyp; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=nyp; i < stop; i += inc) { Complex *p=data+i; for(unsigned int j=0; j < nyp; j++) p[j]=-p[j]; } } else { std::cerr << oddshift << std::endl; exit(1); } } // Out-of-place shift of Fourier origin to (nx/2,0) for even nx. static void Shift(double *data, unsigned int nx, unsigned int ny, unsigned int threads) { if(nx % 2 == 0) { unsigned int stop=nx*ny; unsigned int inc=2*ny; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=ny; i < stop; i += inc) { double *p=data+i; for(unsigned int j=0; j < ny; j++) p[j]=-p[j]; } } else { std::cerr << oddshift << std::endl; exit(1); } } // Inplace shift of Fourier origin to (nx/2,ny/2,0) for even nx and ny. static void Shift(Complex *data, unsigned int nx, unsigned int ny, unsigned int nz, unsigned int threads) { unsigned int nzp=nz/2+1; unsigned int nyzp=ny*nzp; if(nx % 2 == 0 && ny % 2 == 0) { unsigned int pinc=2*nzp; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; i++) { Complex *pstart=data+i*nyzp; Complex *pstop=pstart+nyzp; for(Complex *p=pstart+(1-(i % 2))*nzp; p < pstop; p += pinc) { for(unsigned int k=0; k < nzp; k++) p[k]=-p[k]; } } } else { std::cerr << oddshift << " or odd ny" << std::endl; exit(1); } } // Out-of-place shift of Fourier origin to (nx/2,ny/2,0) for even nx and ny. static void Shift(double *data, unsigned int nx, unsigned int ny, unsigned int nz, unsigned int threads) { unsigned int nyz=ny*nz; if(nx % 2 == 0 && ny % 2 == 0) { unsigned int pinc=2*nz; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; i++) { double *pstart=data+i*nyz; double *pstop=pstart+nyz; for(double *p=pstart+(1-(i % 2))*nz; p < pstop; p += pinc) { for(unsigned int k=0; k < nz; k++) p[k]=-p[k]; } } } else { std::cerr << oddshift << " or odd ny" << std::endl; exit(1); } } fftw() : plan(NULL) {} fftw(unsigned int doubles, int sign, unsigned int threads, unsigned int n=0) : doubles(doubles), sign(sign), threads(threads), norm(1.0/(n ? n : doubles/2)), plan(NULL) { #ifndef FFTWPP_SINGLE_THREAD fftw_init_threads(); #endif } virtual ~fftw() { if(plan) fftw_destroy_plan(plan); } virtual fftw_plan Plan(Complex *in, Complex *out) {return NULL;}; inline void CheckAlign(Complex *p, const char *s) { if((size_t) p % sizeof(Complex) == 0) return; std::cerr << "WARNING: " << s << " array is not " << sizeof(Complex) << "-byte aligned: address " << p << std::endl; } void noplan() { std::cerr << "Unable to construct FFTW plan" << std::endl; exit(1); } static void planThreads(unsigned int threads) { #ifndef FFTWPP_SINGLE_THREAD omp_set_num_threads(threads); fftw_plan_with_nthreads(threads); #endif } threaddata time(fftw_plan plan1, fftw_plan planT, Complex *in, Complex *out, unsigned int Threads) { utils::statistics S,ST; double stop=utils::totalseconds()+testseconds; threads=1; plan=plan1; fft(in,out); threads=Threads; plan=planT; fft(in,out); unsigned int N=1; for(;;) { double t0=utils::totalseconds(); threads=1; plan=plan1; for(unsigned int i=0; i < N; ++i) fft(in,out); double t1=utils::totalseconds(); threads=Threads; plan=planT; for(unsigned int i=0; i < N; ++i) fft(in,out); double t=utils::totalseconds(); S.add(t1-t0); ST.add(t-t1); if(S.mean() < 100.0/CLOCKS_PER_SEC) N *= 2; if(S.count() >= 10) { double error=S.stdev(); double diff=ST.mean()-S.mean(); if(diff >= 0.0 || t > stop) { threads=1; plan=plan1; fftw_destroy_plan(planT); break; } if(diff < -error) { threads=Threads; fftw_destroy_plan(plan1); break; } } } return threaddata(threads,S.mean(),S.stdev()); } virtual threaddata lookup(bool inplace, unsigned int threads) { return threaddata(); } virtual void store(bool inplace, const threaddata& data) {} inline Complex *CheckAlign(Complex *in, Complex *out, bool constructor=true) { #ifndef NO_CHECK_ALIGN CheckAlign(in,constructor ? "constructor input" : "input"); if(out) CheckAlign(out,constructor ? "constructor output" : "output"); else out=in; #else if(!out) out=in; #endif return out; } threaddata Setup(Complex *in, Complex *out=NULL) { bool alloc=!in; if(alloc) in=utils::ComplexAlign((doubles+1)/2); out=CheckAlign(in,out); inplace=(out==in); threaddata data; unsigned int Threads=threads; if(threads > 1) data=lookup(inplace,threads); threads=data.threads > 0 ? data.threads : 1; planThreads(threads); plan=(*planner)(this,in,out); if(!plan) noplan(); fftw_plan planT; if(fftw::maxthreads > 1) { threads=Threads; planThreads(threads); planT=(*planner)(this,in,out); if(data.threads == 0) { if(planT) data=time(plan,planT,in,out,threads); else noplan(); store(inplace,threaddata(threads,data.mean,data.stdev)); } } if(alloc) Array::deleteAlign(in,(doubles+1)/2); return data; } threaddata Setup(Complex *in, double *out) { return Setup(in,(Complex *) out); } threaddata Setup(double *in, Complex *out=NULL) { return Setup((Complex *) in,out); } virtual void Execute(Complex *in, Complex *out, bool=false) { fftw_execute_dft(plan,(fftw_complex *) in,(fftw_complex *) out); } Complex *Setout(Complex *in, Complex *out) { out=CheckAlign(in,out,false); if(inplace ^ (out == in)) { std::cerr << "ERROR: fft " << inout << std::endl; exit(1); } return out; } void fft(Complex *in, Complex *out=NULL) { out=Setout(in,out); Execute(in,out); } void fft(double *in, Complex *out=NULL) { fft((Complex *) in,out); } void fft(Complex *in, double *out) { fft(in,(Complex *) out); } void fft0(Complex *in, Complex *out=NULL) { out=Setout(in,out); Execute(in,out,true); } void fft0(double *in, Complex *out=NULL) { fft0((Complex *) in,out); } void fft0(Complex *in, double *out) { fft0(in,(Complex *) out); } void Normalize(Complex *out) { unsigned int stop=doubles/2; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < stop; i++) out[i] *= norm; } void Normalize(double *out) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < doubles; i++) out[i] *= norm; } virtual void fftNormalized(Complex *in, Complex *out=NULL, bool shift=false) { out=Setout(in,out); Execute(in,out,shift); Normalize(out); } void fftNormalized(Complex *in, double *out, bool shift=false) { out=(double *) Setout(in,(Complex *) out); Execute(in,(Complex *) out,shift); Normalize(out); } void fftNormalized(double *in, Complex *out, bool shift=false) { fftNormalized((Complex *) in,out,shift); } template<class I, class O> void fft0Normalized(I in, O out) { fftNormalized(in,out,true); } template<class O> void Normalize(unsigned int nx, unsigned int M, size_t ostride, size_t odist, O *out) { unsigned int stop=nx*ostride; O *outMdist=out+M*odist; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < stop; i += ostride) { O *pstop=outMdist+i; for(O *p=out+i; p < pstop; p += odist) { *p *= norm; } } } template<class I, class O> void fftNormalized(unsigned int nx, unsigned int M, size_t ostride, size_t odist, I *in, O *out=NULL, bool shift=false) { out=(O *) Setout((Complex *) in,(Complex *) out); Execute((Complex *) in,(Complex *) out,shift); Normalize(nx,M,ostride,odist,out); } }; // class fftw class Transpose { fftw_plan plan; fftw_plan plan2; unsigned int a,b; unsigned int nlength,mlength; unsigned int ilast,jlast; unsigned int rows,cols; unsigned int threads; bool inplace; unsigned int size; public: template<class T> Transpose(unsigned int rows, unsigned int cols, unsigned int length, T *in, T *out=NULL, unsigned int threads=fftw::maxthreads) : rows(rows), cols(cols), threads(threads) { size=sizeof(T); if(size % sizeof(double) != 0) { std::cerr << "ERROR: Transpose is not implemented for type of size " << size; exit(1); } plan=plan2=NULL; if(rows == 0 || cols == 0) return; size /= sizeof(double); length *= size; if(!out) out=in; inplace=(out==in); if(inplace) { fftw::planThreads(threads); threads=1; } else fftw::planThreads(1); fftw_iodim dims[3]; a=std::min(rows,threads); b=std::min(cols,threads/a); unsigned int n=utils::ceilquotient(rows,a); unsigned int m=utils::ceilquotient(cols,b); // If rows <= threads then a=rows and n=1. // If rows >= threads then b=1 and m=cols. nlength=n*length; mlength=m*length; dims[0].n=n; dims[0].is=cols*length; dims[0].os=length; dims[1].n=m; dims[1].is=length; dims[1].os=rows*length; dims[2].n=length; dims[2].is=1; dims[2].os=1; // A plan with rank=0 is a transpose. plan=fftw_plan_guru_r2r(0,NULL,3,dims,(double *) in,(double *) out, NULL,fftw::effort); ilast=a; jlast=b; if(n*a > rows) { // Only happens when rows > threads. a=utils::ceilquotient(rows,n); ilast=a-1; dims[0].n=rows-n*ilast; plan2=fftw_plan_guru_r2r(0,NULL,3,dims,(double *) in,(double *) out, NULL,fftw::effort); } else { // Only happens when rows < threads. if(m*b > cols) { b=utils::ceilquotient(cols,m); jlast=b-1; dims[1].n=cols-m*jlast; plan2=fftw_plan_guru_r2r(0,NULL,3,dims,(double *) in,(double *) out, NULL,fftw::effort); } } } ~Transpose() { if(plan) fftw_destroy_plan(plan); if(plan2) fftw_destroy_plan(plan2); } template<class T> void transpose(T *in, T *out=NULL) { if(rows == 0 || cols == 0) return; if(!out) out=in; if(inplace ^ (out == in)) { std::cerr << "ERROR: Transpose " << inout << std::endl; exit(1); } #ifndef FFTWPP_SINGLE_THREAD if(a > 1) { if(b > 1) { int A=a, B=b; #pragma omp parallel for num_threads(A) for(unsigned int i=0; i < a; ++i) { unsigned int I=i*nlength; #pragma omp parallel for num_threads(B) for(unsigned int j=0; j < b; ++j) { unsigned int J=j*mlength; fftw_execute_r2r((i < ilast && j < jlast) ? plan : plan2, (double *) in+cols*I+J, (double *) out+rows*J+I); } } } else { int A=a; #pragma omp parallel for num_threads(A) for(unsigned int i=0; i < a; ++i) { unsigned int I=i*nlength; fftw_execute_r2r(i < ilast ? plan : plan2, (double *) in+cols*I,(double *) out+I); } } } else if(b > 1) { int B=b; #pragma omp parallel for num_threads(B) for(unsigned int j=0; j < b; ++j) { unsigned int J=j*mlength; fftw_execute_r2r(j < jlast ? plan : plan2, (double *) in+J,(double *) out+rows*J); } } else #endif fftw_execute_r2r(plan,(double *) in,(double*) out); } }; template<class T, class L> class Threadtable { public: typedef std::map<T,threaddata,L> Table; threaddata Lookup(Table& table, T key) { typename Table::iterator p=table.find(key); return p == table.end() ? threaddata() : p->second; } void Store(Table& threadtable, T key, const threaddata& data) { threadtable[key]=data; } }; struct keytype1 { unsigned int nx; unsigned int threads; bool inplace; keytype1(unsigned int nx, unsigned int threads, bool inplace) : nx(nx), threads(threads), inplace(inplace) {} }; struct keyless1 { bool operator()(const keytype1& a, const keytype1& b) const { return a.nx < b.nx || (a.nx == b.nx && (a.threads < b.threads || (a.threads == b.threads && a.inplace < b.inplace))); } }; struct keytype2 { unsigned int nx; unsigned int ny; unsigned int threads; bool inplace; keytype2(unsigned int nx, unsigned int ny, unsigned int threads, bool inplace) : nx(nx), ny(ny), threads(threads), inplace(inplace) {} }; struct keyless2 { bool operator()(const keytype2& a, const keytype2& b) const { return a.nx < b.nx || (a.nx == b.nx && (a.ny < b.ny || (a.ny == b.ny && (a.threads < b.threads || (a.threads == b.threads && a.inplace < b.inplace))))); } }; struct keytype3 { unsigned int nx; unsigned int ny; unsigned int nz; unsigned int threads; bool inplace; keytype3(unsigned int nx, unsigned int ny, unsigned int nz, unsigned int threads, bool inplace) : nx(nx), ny(ny), nz(nz), threads(threads), inplace(inplace) {} }; struct keyless3 { bool operator()(const keytype3& a, const keytype3& b) const { return a.nx < b.nx || (a.nx == b.nx && (a.ny < b.ny || (a.ny == b.ny && (a.nz < b.nz || (a.nz == b.nz && (a.threads < b.threads || (a.threads == b.threads && a.inplace < b.inplace))))))); } }; // Compute the complex Fourier transform of n complex values. // Before calling fft(), the arrays in and out (which may coincide) must be // allocated as Complex[n]. // // Out-of-place usage: // // fft1d Forward(n,-1,in,out); // Forward.fft(in,out); // // fft1d Backward(n,1,in,out); // Backward.fft(in,out); // // fft1d Backward(n,1,in,out); // Backward.fftNormalized(in,out); // True inverse of Forward.fft(out,in); // // In-place usage: // // fft1d Forward(n,-1); // Forward.fft(in); // // fft1d Backward(n,1); // Backward.fft(in); // class fft1d : public fftw, public Threadtable<keytype1,keyless1> { unsigned int nx; static Table threadtable; public: fft1d(unsigned int nx, int sign, Complex *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx,sign,threads), nx(nx) {Setup(in,out);} #ifdef __Array_h__ fft1d(int sign, const Array::array1<Complex>& in, const Array::array1<Complex>& out=Array::NULL1, unsigned int threads=maxthreads) : fftw(2*in.Nx(),sign,threads), nx(in.Nx()) {Setup(in,out);} #endif threaddata lookup(bool inplace, unsigned int threads) { return this->Lookup(threadtable,keytype1(nx,threads,inplace)); } void store(bool inplace, const threaddata& data) { this->Store(threadtable,keytype1(nx,data.threads,inplace),data); } fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_1d(nx,(fftw_complex *) in,(fftw_complex *) out, sign,effort); } }; template<class I, class O> class fftwblock : public virtual fftw { public: int nx; unsigned int M; size_t istride,ostride; size_t idist,odist; fftw_plan plan1,plan2; unsigned int T,Q,R; fftwblock(unsigned int nx, unsigned int M, size_t istride, size_t ostride, size_t idist, size_t odist, Complex *in, Complex *out, unsigned int Threads) : fftw(), nx(nx), M(M), istride(istride), ostride(ostride), idist(Dist(nx,istride,idist)), odist(Dist(nx,ostride,odist)), plan1(NULL), plan2(NULL) { T=1; Q=M; R=0; threaddata S1=Setup(in,out); fftw_plan planT1=plan; if(fftw::maxthreads > 1) { if(Threads > 1) { T=std::min(M,Threads); Q=T > 0 ? M/T : 0; R=M-Q*T; threads=Threads; threaddata ST=Setup(in,out); if(R > 0 && threads == 1 && plan1 != plan2) { fftw_destroy_plan(plan2); plan2=plan1; } if(ST.mean > S1.mean-S1.stdev) { // Use FFTW's multi-threading fftw_destroy_plan(plan); if(R > 0) { fftw_destroy_plan(plan2); plan2=NULL; } T=1; Q=M; R=0; plan=planT1; threads=S1.threads; } else { // Do the multi-threading ourselves fftw_destroy_plan(planT1); threads=ST.threads; } } else Setup(in,out); // Synchronize wisdom } } fftw_plan Plan(int Q, fftw_complex *in, fftw_complex *out) { return fftw_plan_many_dft(1,&nx,Q,in,NULL,istride,idist, out,NULL,ostride,odist,sign,effort); } fftw_plan Plan(int Q, double *in, fftw_complex *out) { return fftw_plan_many_dft_r2c(1,&nx,Q,in,NULL,istride,idist, out,NULL,ostride,odist,effort); } fftw_plan Plan(int Q, fftw_complex *in, double *out) { return fftw_plan_many_dft_c2r(1,&nx,Q,in,NULL,istride,idist, out,NULL,ostride,odist,effort); } fftw_plan Plan(Complex *in, Complex *out) { if(R > 0) { plan2=Plan(Q+1,(I *) in,(O *) out); if(!plan2) return NULL; if(threads == 1) plan1=plan2; } return Plan(Q,(I *) in,(O *) out); } void Execute(fftw_plan plan, fftw_complex *in, fftw_complex *out) { fftw_execute_dft(plan,in,out); } void Execute(fftw_plan plan, double *in, fftw_complex *out) { fftw_execute_dft_r2c(plan,in,out); } void Execute(fftw_plan plan, fftw_complex *in, double *out) { fftw_execute_dft_c2r(plan,in,out); } void Execute(Complex *in, Complex *out, bool=false) { if(T == 1) Execute(plan,(I *) in,(O *) out); else { unsigned int extra=T-R; #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(T) #endif for(unsigned int i=0; i < T; ++i) { unsigned int iQ=i*Q; if(i < extra) Execute(plan,(I *) in+iQ*idist,(O *) out+iQ*odist); else { unsigned int offset=iQ+i-extra; Execute(plan2,(I *) in+offset*idist,(O *) out+offset*odist); } } } } unsigned int Threads() {return std::max(T,threads);} ~fftwblock() { if(plan2) fftw_destroy_plan(plan2); } }; // Compute the complex Fourier transform of M complex vectors, each of // length n. // Before calling fft(), the arrays in and out (which may coincide) must be // allocated as Complex[M*n]. // // Out-of-place usage: // // mfft1d Forward(n,-1,M,stride,dist,in,out); // Forward.fft(in,out); // // In-place usage: // // mfft1d Forward(n,-1,M,stride,dist); // Forward.fft(in); // // Notes: // stride is the spacing between the elements of each Complex vector; // dist is the spacing between the first elements of the vectors. // // class mfft1d : public fftwblock<fftw_complex,fftw_complex>, public Threadtable<keytype3,keyless3> { static Table threadtable; public: mfft1d(unsigned int nx, int sign, unsigned int M=1, size_t stride=1, size_t dist=0, Complex *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*((nx-1)*stride+(M-1)*Dist(nx,stride,dist)+1),sign,threads,nx), fftwblock<fftw_complex,fftw_complex> (nx,M,stride,stride,dist,dist,in,out,threads) {} mfft1d(unsigned int nx, int sign, unsigned int M, size_t istride, size_t ostride, size_t idist, size_t odist, Complex *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads): fftw(std::max(2*((nx-1)*istride+(M-1)*Dist(nx,istride,idist)+1), 2*((nx-1)*ostride+(M-1)*Dist(nx,ostride,odist)+1)),sign, threads, nx), fftwblock<fftw_complex,fftw_complex>(nx,M,istride,ostride,idist,odist,in, out,threads) {} threaddata lookup(bool inplace, unsigned int threads) { return Lookup(threadtable,keytype3(nx,Q,R,threads,inplace)); } void store(bool inplace, const threaddata& data) { Store(threadtable,keytype3(nx,Q,R,data.threads,inplace),data); } }; // Compute the complex Fourier transform of n real values, using phase sign -1. // Before calling fft(), the array in must be allocated as double[n] and // the array out must be allocated as Complex[n/2+1]. The arrays in and out // may coincide, allocated as Complex[n/2+1]. // // Out-of-place usage: // // rcfft1d Forward(n,in,out); // Forward.fft(in,out); // // In-place usage: // // rcfft1d Forward(n); // Forward.fft(out); // // Notes: // in contains the n real values stored as a Complex array; // out contains the first n/2+1 Complex Fourier values. // class rcfft1d : public fftw, public Threadtable<keytype1,keyless1> { unsigned int nx; static Table threadtable; public: rcfft1d(unsigned int nx, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*(nx/2+1),-1,threads,nx), nx(nx) {Setup(out,(double*) NULL);} rcfft1d(unsigned int nx, double *in, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*(nx/2+1),-1,threads,nx), nx(nx) {Setup(in,out);} threaddata lookup(bool inplace, unsigned int threads) { return Lookup(threadtable,keytype1(nx,threads,inplace)); } void store(bool inplace, const threaddata& data) { Store(threadtable,keytype1(nx,data.threads,inplace),data); } fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_r2c_1d(nx,(double *) in,(fftw_complex *) out, effort); } void Execute(Complex *in, Complex *out, bool=false) { fftw_execute_dft_r2c(plan,(double *) in,(fftw_complex *) out); } }; // Compute the real inverse Fourier transform of the n/2+1 Complex values // corresponding to the non-negative part of the frequency spectrum, using // phase sign +1. // Before calling fft(), the array in must be allocated as Complex[n/2+1] // and the array out must be allocated as double[n]. The arrays in and out // may coincide, allocated as Complex[n/2+1]. // // Out-of-place usage (input destroyed): // // crfft1d Backward(n,in,out); // Backward.fft(in,out); // // In-place usage: // // crfft1d Backward(n); // Backward.fft(in); // // Notes: // in contains the first n/2+1 Complex Fourier values. // out contains the n real values stored as a Complex array; // class crfft1d : public fftw, public Threadtable<keytype1,keyless1> { unsigned int nx; static Table threadtable; public: crfft1d(unsigned int nx, double *out=NULL, unsigned int threads=maxthreads) : fftw(2*(nx/2+1),1,threads,nx), nx(nx) {Setup(out);} crfft1d(unsigned int nx, Complex *in, double *out=NULL, unsigned int threads=maxthreads) : fftw(realsize(nx,in,out),1,threads,nx), nx(nx) {Setup(in,out);} threaddata lookup(bool inplace, unsigned int threads) { return Lookup(threadtable,keytype1(nx,threads,inplace)); } void store(bool inplace, const threaddata& data) { Store(threadtable,keytype1(nx,data.threads,inplace),data); } fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_c2r_1d(nx,(fftw_complex *) in,(double *) out,effort); } void Execute(Complex *in, Complex *out, bool=false) { fftw_execute_dft_c2r(plan,(fftw_complex *) in,(double *) out); } }; // Compute the real Fourier transform of M real vectors, each of length n, // using phase sign -1. Before calling fft(), the array in must be // allocated as double[M*n] and the array out must be allocated as // Complex[M*(n/2+1)]. The arrays in and out may coincide, // allocated as Complex[M*(n/2+1)]. // // Out-of-place usage: // // mrcfft1d Forward(n,M,istride,ostride,idist,odist,in,out); // Forward.fft(in,out); // // In-place usage: // // mrcfft1d Forward(n,M,istride,ostride,idist,odist); // Forward.fft(out); // // Notes: // istride is the spacing between the elements of each real vector; // ostride is the spacing between the elements of each Complex vector; // idist is the spacing between the first elements of the real vectors; // odist is the spacing between the first elements of the Complex vectors; // in contains the n real values stored as a Complex array; // out contains the first n/2+1 Complex Fourier values. // class mrcfft1d : public fftwblock<double,fftw_complex>, public Threadtable<keytype3,keyless3> { static Table threadtable; public: mrcfft1d(unsigned int nx, unsigned int M, size_t istride, size_t ostride, size_t idist, size_t odist, double *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(std::max((realsize(nx,in,out)-2)*istride+(M-1)*idist+2, 2*(nx/2*ostride+(M-1)*odist+1)),-1,threads,nx), fftwblock<double,fftw_complex> (nx,M,istride,ostride,idist,odist,(Complex *) in,out,threads) {} threaddata lookup(bool inplace, unsigned int threads) { return Lookup(threadtable,keytype3(nx,Q,R,threads,inplace)); } void store(bool inplace, const threaddata& data) { Store(threadtable,keytype3(nx,Q,R,data.threads,inplace),data); } void Normalize(Complex *out) { fftw::Normalize<Complex>(nx/2+1,M,ostride,odist,out); } void fftNormalized(double *in, Complex *out=NULL) { fftw::fftNormalized<double,Complex>(nx/2+1,M,ostride,odist,in,out,false); } void fft0Normalized(double *in, Complex *out=NULL) { fftw::fftNormalized<double,Complex>(nx/2+1,M,ostride,odist,in,out,true); } }; // Compute the real inverse Fourier transform of M complex vectors, each of // length n/2+1, corresponding to the non-negative parts of the frequency // spectra, using phase sign +1. Before calling fft(), the array in must be // allocated as Complex[M*(n/2+1)] and the array out must be allocated as // double[M*n]. The arrays in and out may coincide, // allocated as Complex[M*(n/2+1)]. // // Out-of-place usage (input destroyed): // // mcrfft1d Backward(n,M,istride,ostride,idist,odist,in,out); // Backward.fft(in,out); // // In-place usage: // // mcrfft1d Backward(n,M,istride,ostride,idist,odist); // Backward.fft(out); // // Notes: // stride is the spacing between the elements of each Complex vector; // dist is the spacing between the first elements of the vectors; // in contains the first n/2+1 Complex Fourier values; // out contains the n real values stored as a Complex array. // class mcrfft1d : public fftwblock<fftw_complex,double>, public Threadtable<keytype3,keyless3> { static Table threadtable; public: mcrfft1d(unsigned int nx, unsigned int M, size_t istride, size_t ostride, size_t idist, size_t odist, Complex *in=NULL, double *out=NULL, unsigned int threads=maxthreads) : fftw(std::max(2*(nx/2*istride+(M-1)*idist+1), (realsize(nx,in,out)-2)*ostride+(M-1)*odist+2),1,threads,nx), fftwblock<fftw_complex,double> (nx,M,istride,ostride,idist,odist,in,(Complex *) out,threads) {} threaddata lookup(bool inplace, unsigned int threads) { return Lookup(threadtable,keytype3(nx,Q,R,threads,inplace)); } void store(bool inplace, const threaddata& data) { Store(threadtable,keytype3(nx,Q,R,data.threads,inplace),data); } void Normalize(double *out) { fftw::Normalize<double>(nx,M,ostride,odist,out); } void fftNormalized(Complex *in, double *out=NULL) { fftw::fftNormalized<Complex,double>(nx,M,ostride,odist,in,out,false); } void fft0Normalized(Complex *in, double *out=NULL) { fftw::fftNormalized<Complex,double>(nx,M,ostride,odist,in,out,true); } }; // Compute the complex two-dimensional Fourier transform of nx times ny // complex values. Before calling fft(), the arrays in and out (which may // coincide) must be allocated as Complex[nx*ny]. // // Out-of-place usage: // // fft2d Forward(nx,ny,-1,in,out); // Forward.fft(in,out); // // fft2d Backward(nx,ny,1,in,out); // Backward.fft(in,out); // // fft2d Backward(nx,ny,1,in,out); // Backward.fftNormalized(in,out); // True inverse of Forward.fft(out,in); // // In-place usage: // // fft2d Forward(nx,ny,-1); // Forward.fft(in); // // fft2d Backward(nx,ny,1); // Backward.fft(in); // // Note: // in[ny*i+j] contains the ny Complex values for each i=0,...,nx-1. // class fft2d : public fftw, public Threadtable<keytype2,keyless2> { unsigned int nx; unsigned int ny; static Table threadtable; public: fft2d(unsigned int nx, unsigned int ny, int sign, Complex *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*ny,sign,threads), nx(nx), ny(ny) {Setup(in,out);} #ifdef __Array_h__ fft2d(int sign, const Array::array2<Complex>& in, const Array::array2<Complex>& out=Array::NULL2, unsigned int threads=maxthreads) : fftw(2*in.Size(),sign,threads), nx(in.Nx()), ny(in.Ny()) { Setup(in,out); } #endif threaddata lookup(bool inplace, unsigned int threads) { return this->Lookup(threadtable,keytype2(nx,ny,threads,inplace)); } void store(bool inplace, const threaddata& data) { this->Store(threadtable,keytype2(nx,ny,data.threads,inplace),data); } fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_2d(nx,ny,(fftw_complex *) in,(fftw_complex *) out, sign,effort); } void Execute(Complex *in, Complex *out, bool=false) { fftw_execute_dft(plan,(fftw_complex *) in,(fftw_complex *) out); } }; // Compute the complex two-dimensional Fourier transform of nx times ny real // values, using phase sign -1. // Before calling fft(), the array in must be allocated as double[nx*ny] and // the array out must be allocated as Complex[nx*(ny/2+1)]. The arrays in // and out may coincide, allocated as Complex[nx*(ny/2+1)]. // // Out-of-place usage: // // rcfft2d Forward(nx,ny,in,out); // Forward.fft(in,out); // Origin of Fourier domain at (0,0) // Forward.fft0(in,out); // Origin of Fourier domain at (nx/2,0); // input destroyed. // // In-place usage: // // rcfft2d Forward(nx,ny); // Forward.fft(in); // Origin of Fourier domain at (0,0) // Forward.fft0(in); // Origin of Fourier domain at (nx/2,0) // // Notes: // in contains the nx*ny real values stored as a Complex array; // out contains the upper-half portion (ky >= 0) of the Complex transform. // class rcfft2d : public fftw { unsigned int nx; unsigned int ny; public: rcfft2d(unsigned int nx, unsigned int ny, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*(ny/2+1),-1,threads,nx*ny), nx(nx), ny(ny) {Setup(out);} rcfft2d(unsigned int nx, unsigned int ny, double *in, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*(ny/2+1),-1,threads,nx*ny), nx(nx), ny(ny) { Setup(in,out); } fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_r2c_2d(nx,ny,(double *) in,(fftw_complex *) out, effort); } void Execute(Complex *in, Complex *out, bool shift=false) { if(shift) { if(inplace) Shift(in,nx,ny,threads); else Shift((double *) in,nx,ny,threads); } fftw_execute_dft_r2c(plan,(double *) in,(fftw_complex *) out); } // Set Nyquist modes of even shifted transforms to zero. void deNyquist(Complex *f) { unsigned int nyp=ny/2+1; if(nx % 2 == 0) #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int j=0; j < nyp; ++j) f[j]=0.0; if(ny % 2 == 0) #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; ++i) f[(i+1)*nyp-1]=0.0; } }; // Compute the real two-dimensional inverse Fourier transform of the // nx*(ny/2+1) Complex values corresponding to the spectral values in the // half-plane ky >= 0, using phase sign +1. // Before calling fft(), the array in must be allocated as // Complex[nx*(ny/2+1)] and the array out must be allocated as // double[nx*ny]. The arrays in and out may coincide, // allocated as Complex[nx*(ny/2+1)]. // // Out-of-place usage (input destroyed): // // crfft2d Backward(nx,ny,in,out); // Backward.fft(in,out); // Origin of Fourier domain at (0,0) // Backward.fft0(in,out); // Origin of Fourier domain at (nx/2,0) // // In-place usage: // // crfft2d Backward(nx,ny); // Backward.fft(in); // Origin of Fourier domain at (0,0) // Backward.fft0(in); // Origin of Fourier domain at (nx/2,0) // // Notes: // in contains the upper-half portion (ky >= 0) of the Complex transform; // out contains the nx*ny real values stored as a Complex array. // class crfft2d : public fftw { unsigned int nx; unsigned int ny; public: crfft2d(unsigned int nx, unsigned int ny, double *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*(ny/2+1),1,threads,nx*ny), nx(nx), ny(ny) {Setup(out);} crfft2d(unsigned int nx, unsigned int ny, Complex *in, double *out=NULL, unsigned int threads=maxthreads) : fftw(nx*realsize(ny,in,out),1,threads,nx*ny), nx(nx), ny(ny) { Setup(in,out); } fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_c2r_2d(nx,ny,(fftw_complex *) in,(double *) out, effort); } void Execute(Complex *in, Complex *out, bool shift=false) { fftw_execute_dft_c2r(plan,(fftw_complex *) in,(double *) out); if(shift) { if(inplace) Shift(out,nx,ny,threads); else Shift((double *) out,nx,ny,threads); } } // Set Nyquist modes of even shifted transforms to zero. void deNyquist(Complex *f) { unsigned int nyp=ny/2+1; if(nx % 2 == 0) #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int j=0; j < nyp; ++j) f[j]=0.0; if(ny % 2 == 0) #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; ++i) f[(i+1)*nyp-1]=0.0; } }; // Compute the complex three-dimensional Fourier transform of // nx times ny times nz complex values. Before calling fft(), the arrays in // and out (which may coincide) must be allocated as Complex[nx*ny*nz]. // // Out-of-place usage: // // fft3d Forward(nx,ny,nz,-1,in,out); // Forward.fft(in,out); // // fft3d Backward(nx,ny,nz,1,in,out); // Backward.fft(in,out); // // fft3d Backward(nx,ny,nz,1,in,out); // Backward.fftNormalized(in,out); // True inverse of Forward.fft(out,in); // // In-place usage: // // fft3d Forward(nx,ny,nz,-1); // Forward.fft(in); // // fft3d Backward(nx,ny,nz,1); // Backward.fft(in); // // Note: // in[nz*(ny*i+j)+k] contains the (i,j,k)th Complex value, // indexed by i=0,...,nx-1, j=0,...,ny-1, and k=0,...,nz-1. // class fft3d : public fftw { unsigned int nx; unsigned int ny; unsigned int nz; public: fft3d(unsigned int nx, unsigned int ny, unsigned int nz, int sign, Complex *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*ny*nz,sign,threads), nx(nx), ny(ny), nz(nz) {Setup(in,out);} #ifdef __Array_h__ fft3d(int sign, const Array::array3<Complex>& in, const Array::array3<Complex>& out=Array::NULL3, unsigned int threads=maxthreads) : fftw(2*in.Size(),sign,threads), nx(in.Nx()), ny(in.Ny()), nz(in.Nz()) {Setup(in,out);} #endif fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_3d(nx,ny,nz,(fftw_complex *) in, (fftw_complex *) out, sign, effort); } }; // Compute the complex two-dimensional Fourier transform of // nx times ny times nz real values, using phase sign -1. // Before calling fft(), the array in must be allocated as double[nx*ny*nz] // and the array out must be allocated as Complex[nx*ny*(nz/2+1)]. The // arrays in and out may coincide, allocated as Complex[nx*ny*(nz/2+1)]. // // Out-of-place usage: // // rcfft3d Forward(nx,ny,nz,in,out); // Forward.fft(in,out); // Origin of Fourier domain at (0,0) // Forward.fft0(in,out); // Origin of Fourier domain at (nx/2,ny/2,0); // input destroyed // In-place usage: // // rcfft3d Forward(nx,ny,nz); // Forward.fft(in); // Origin of Fourier domain at (0,0) // Forward.fft0(in); // Origin of Fourier domain at (nx/2,ny/2,0) // // Notes: // in contains the nx*ny*nz real values stored as a Complex array; // out contains the upper-half portion (kz >= 0) of the Complex transform. // class rcfft3d : public fftw { unsigned int nx; unsigned int ny; unsigned int nz; public: rcfft3d(unsigned int nx, unsigned int ny, unsigned int nz, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*ny*(nz/2+1),-1,threads,nx*ny*nz), nx(nx), ny(ny), nz(nz) { Setup(out); } rcfft3d(unsigned int nx, unsigned int ny, unsigned int nz, double *in, Complex *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*ny*(nz/2+1),-1,threads,nx*ny*nz), nx(nx), ny(ny), nz(nz) {Setup(in,out);} fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_r2c_3d(nx,ny,nz,(double *) in,(fftw_complex *) out, effort); } void Execute(Complex *in, Complex *out, bool shift=false) { if(shift) { if(inplace) Shift(in,nx,ny,nz,threads); else Shift((double *) in,nx,ny,nz,threads); } fftw_execute_dft_r2c(plan,(double *) in,(fftw_complex *) out); } // Set Nyquist modes of even shifted transforms to zero. void deNyquist(Complex *f) { unsigned int nzp=nz/2+1; unsigned int yz=ny*nzp; if(nx % 2 == 0) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int k=0; k < yz; ++k) f[k]=0.0; } if(ny % 2 == 0) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; ++i) { unsigned int iyz=i*yz; for(unsigned int k=0; k < nzp; ++k) f[iyz+k]=0.0; } } if(nz % 2 == 0) #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; ++i) for(unsigned int j=0; j < ny; ++j) f[i*yz+(j+1)*nzp-1]=0.0; } }; // Compute the real two-dimensional inverse Fourier transform of the // nx*ny*(nz/2+1) Complex values corresponding to the spectral values in the // half-plane kz >= 0, using phase sign +1. // Before calling fft(), the array in must be allocated as // Complex[nx*ny*(nz+1)/2] and the array out must be allocated as // double[nx*ny*nz]. The arrays in and out may coincide, // allocated as Complex[nx*ny*(nz/2+1)]. // // Out-of-place usage (input destroyed): // // crfft3d Backward(nx,ny,nz,in,out); // Backward.fft(in,out); // Origin of Fourier domain at (0,0) // Backward.fft0(in,out); // Origin of Fourier domain at (nx/2,ny/2,0) // // In-place usage: // // crfft3d Backward(nx,ny,nz); // Backward.fft(in); // Origin of Fourier domain at (0,0) // Backward.fft0(in); // Origin of Fourier domain at (nx/2,ny/2,0) // // Notes: // in contains the upper-half portion (kz >= 0) of the Complex transform; // out contains the nx*ny*nz real values stored as a Complex array. // class crfft3d : public fftw { unsigned int nx; unsigned int ny; unsigned int nz; public: crfft3d(unsigned int nx, unsigned int ny, unsigned int nz, double *out=NULL, unsigned int threads=maxthreads) : fftw(2*nx*ny*(nz/2+1),1,threads,nx*ny*nz), nx(nx), ny(ny), nz(nz) {Setup(out);} crfft3d(unsigned int nx, unsigned int ny, unsigned int nz, Complex *in, double *out=NULL, unsigned int threads=maxthreads) : fftw(nx*ny*(realsize(nz,in,out)),1,threads,nx*ny*nz), nx(nx), ny(ny), nz(nz) {Setup(in,out);} fftw_plan Plan(Complex *in, Complex *out) { return fftw_plan_dft_c2r_3d(nx,ny,nz,(fftw_complex *) in,(double *) out, effort); } void Execute(Complex *in, Complex *out, bool shift=false) { fftw_execute_dft_c2r(plan,(fftw_complex *) in,(double *) out); if(shift) { if(inplace) Shift(out,nx,ny,nz,threads); else Shift((double *) out,nx,ny,nz,threads); } } // Set Nyquist modes of even shifted transforms to zero. void deNyquist(Complex *f) { unsigned int nzp=nz/2+1; unsigned int yz=ny*nzp; if(nx % 2 == 0) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int k=0; k < yz; ++k) f[k]=0.0; } if(ny % 2 == 0) { #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; ++i) { unsigned int iyz=i*yz; for(unsigned int k=0; k < nzp; ++k) f[iyz+k]=0.0; } } if(nz % 2 == 0) #ifndef FFTWPP_SINGLE_THREAD #pragma omp parallel for num_threads(threads) #endif for(unsigned int i=0; i < nx; ++i) for(unsigned int j=0; j < ny; ++j) f[i*yz+(j+1)*nzp-1]=0.0; } }; } #endif
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=GetMagickResourceLimit(WidthResource); cache_info->height_limit=GetMagickResourceLimit(HeightResource); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads, sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { register ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; register ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { register ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; register ssize_t i; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
UsefulFunctions.c
// ------------------------------------------------------------------------------------- // Taken from COSMOLOGY.H // ------------------------------------------------------------------------------------- #define Ho (double) (cosmo_params_ufunc->hlittle*3.2407e-18) // s^-1 at z=0 #define RHOcrit (double) ( (3.0*Ho*Ho / (8.0*PI*G)) * (CMperMPC*CMperMPC*CMperMPC)/Msun) // Msun Mpc^-3 ---- at z=0 #define RHOcrit_cgs (double) (3.0*Ho*Ho / (8.0*PI*G)) // g pcm^-3 ---- at z=0 #define No (double) (RHOcrit_cgs*cosmo_params_ufunc->OMb*(1-global_params.Y_He)/m_p) // current hydrogen number density estimate (#/cm^3) ~1.92e-7 #define He_No (double) (RHOcrit_cgs*cosmo_params_ufunc->OMb*global_params.Y_He/(4.0*m_p)) // current helium number density estimate #define N_b0 (double) (No+He_No) // present-day baryon num density, H + He #define f_H (double) (No/(No+He_No)) // hydrogen number fraction #define f_He (double) (He_No/(No+He_No)) // helium number fraction struct CosmoParams *cosmo_params_ufunc; struct UserParams *user_params_ufunc; void Broadcast_struct_global_UF(struct UserParams *user_params, struct CosmoParams *cosmo_params){ cosmo_params_ufunc = cosmo_params; user_params_ufunc = user_params; } float ComputeFullyIoinizedTemperature(float z_re, float z, float delta){ // z_re: the redshift of reionization // z: the current redshift // delta:the density contrast float result, delta_re; // just be fully ionized if (fabs(z - z_re) < 1e-4) result = 1; else{ // linearly extrapolate to get density at reionization delta_re = delta * (1. + z ) / (1. + z_re); if (delta_re<=-1) delta_re=-1. + global_params.MIN_DENSITY_LOW_LIMIT; // evolving ionized box eq. 6 of McQuinn 2015, ignored the dependency of density at ionization if (delta<=-1) delta=-1. + global_params.MIN_DENSITY_LOW_LIMIT; result = pow((1. + delta) / (1. + delta_re), 1.1333); result *= pow((1. + z) / (1. + z_re), 3.4); result *= expf(pow((1. + z)/7.1, 2.5) - pow((1. + z_re)/7.1, 2.5)); } result *= pow(global_params.T_RE, 1.7); // 1e4 before helium reionization; double it after result += pow(1e4 * ((1. + z)/4.), 1.7) * ( 1 + delta); result = pow(result, 0.5882); //LOG_DEBUG("z_re=%.4f, z=%.4f, delta=%e, Tk=%.f", z_re, z, delta, result); return result; } float ComputePartiallyIoinizedTemperature(float T_HI, float res_xH){ if (res_xH<=0.) return global_params.T_RE; if (res_xH>=1) return T_HI; return T_HI * res_xH + global_params.T_RE * (1. - res_xH); } void filter_box(fftwf_complex *box, int RES, int filter_type, float R){ int n_x, n_z, n_y, dimension,midpoint; float k_x, k_y, k_z, k_mag, kR; switch(RES) { case 0: dimension = user_params_ufunc->DIM; midpoint = MIDDLE; break; case 1: dimension = user_params_ufunc->HII_DIM; midpoint = HII_MIDDLE; break; } // loop through k-box #pragma omp parallel shared(box) private(n_x,n_y,n_z,k_x,k_y,k_z,k_mag,kR) num_threads(user_params_ufunc->N_THREADS) { #pragma omp for for (n_x=0; n_x<dimension; n_x++){ // for (n_x=dimension; n_x--;){ if (n_x>midpoint) {k_x =(n_x-dimension) * DELTA_K;} else {k_x = n_x * DELTA_K;} for (n_y=0; n_y<dimension; n_y++){ // for (n_y=dimension; n_y--;){ if (n_y>midpoint) {k_y =(n_y-dimension) * DELTA_K;} else {k_y = n_y * DELTA_K;} // for (n_z=(midpoint+1); n_z--;){ for (n_z=0; n_z<=midpoint; n_z++){ k_z = n_z * DELTA_K; if (filter_type == 0){ // real space top-hat k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z); kR = k_mag*R; // real space top-hat if (kR > 1e-4){ if(RES==1) { box[HII_C_INDEX(n_x, n_y, n_z)] *= 3.0*pow(kR, -3) * (sin(kR) - cos(kR)*kR); } if(RES==0) { box[C_INDEX(n_x, n_y, n_z)] *= 3.0*pow(kR, -3) * (sin(kR) - cos(kR)*kR); } } } else if (filter_type == 1){ // k-space top hat // This is actually (kR^2) but since we zero the value and find kR > 1 this is more computationally efficient // as we don't need to evaluate the slower sqrt function // kR = 0.17103765852*( k_x*k_x + k_y*k_y + k_z*k_z )*R*R; k_mag = sqrt(k_x*k_x + k_y*k_y + k_z*k_z); kR = k_mag*R; // real space top-hat kR *= 0.413566994; // equates integrated volume to the real space top-hat (9pi/2)^(-1/3) if (kR > 1){ if(RES==1) { box[HII_C_INDEX(n_x, n_y, n_z)] = 0; } if(RES==0) { box[C_INDEX(n_x, n_y, n_z)] = 0; } } } else if (filter_type == 2){ // gaussian // This is actually (kR^2) but since we zero the value and find kR > 1 this is more computationally efficient // as we don't need to evaluate the slower sqrt function kR = 0.643*0.643*( k_x*k_x + k_y*k_y + k_z*k_z )*R*R; // kR *= 0.643; // equates integrated volume to the real space top-hat if(RES==1) { box[HII_C_INDEX(n_x, n_y, n_z)] *= pow(E, -kR/2.0); } if(RES==0) { box[C_INDEX(n_x, n_y, n_z)] *= pow(E, -kR/2.0); } } else{ if ( (n_x==0) && (n_y==0) && (n_z==0) ) LOG_WARNING("Filter type %i is undefined. Box is unfiltered.", filter_type); } } } } // end looping through k box } return; } double MtoR(double M); double RtoM(double R); double TtoM(double z, double T, double mu); double dicke(double z); double dtdz(float z); double ddickedt(double z); double omega_mz(float z); double Deltac_nonlinear(float z); double drdz(float z); /* comoving distance, (1+z)*C*dtdz(in cm) per unit z */ double alpha_A(double T); /* returns the case B hydrogen recombination coefficient (Spitzer 1978) in cm^3 s^-1*/ double alpha_B(double T); double HeI_ion_crosssec(double nu); double HeII_ion_crosssec(double nu); double HI_ion_crosssec(double nu); /* R in Mpc, M in Msun */ double MtoR(double M){ // set R according to M<->R conversion defined by the filter type in ../Parameter_files/COSMOLOGY.H if (global_params.FILTER == 0) //top hat M = (4/3) PI <rho> R^3 return pow(3*M/(4*PI*cosmo_params_ufunc->OMm*RHOcrit), 1.0/3.0); else if (global_params.FILTER == 1) //gaussian: M = (2PI)^1.5 <rho> R^3 return pow( M/(pow(2*PI, 1.5) * cosmo_params_ufunc->OMm * RHOcrit), 1.0/3.0 ); else // filter not defined LOG_ERROR("No such filter = %i. Results are bogus.", global_params.FILTER); Throw ValueError; } /* R in Mpc, M in Msun */ double RtoM(double R){ // set M according to M<->R conversion defined by the filter type in ../Parameter_files/COSMOLOGY.H if (global_params.FILTER == 0) //top hat M = (4/3) PI <rho> R^3 return (4.0/3.0)*PI*pow(R,3)*(cosmo_params_ufunc->OMm*RHOcrit); else if (global_params.FILTER == 1) //gaussian: M = (2PI)^1.5 <rho> R^3 return pow(2*PI, 1.5) * cosmo_params_ufunc->OMm*RHOcrit * pow(R, 3); else // filter not defined LOG_ERROR("No such filter = %i. Results are bogus.", global_params.FILTER); Throw ValueError; } /* T in K, M in Msun, mu is mean molecular weight from Barkana & Loeb 2001 SUPRESS = 0 for no radiation field supression; SUPRESS = 1 for supression (step function at z=z_ss, at v=v_zz) */ double TtoM(double z, double T, double mu){ return 7030.97 / (cosmo_params_ufunc->hlittle) * sqrt( omega_mz(z) / (cosmo_params_ufunc->OMm*Deltac_nonlinear(z))) * pow( T/(mu * (1+z)), 1.5 ); /* if (!SUPRESS || (z >= z_re) ) // pre-reionization or don't worry about supression return 7030.97 / hlittle * sqrt( omega_mz(z) / (OMm*Deltac_nonlinear(z)) ) * pow( T/(mu * (1+z)), 1.5 ); if (z >= z_ss) // self-shielding dominates, use T = 1e4 K return 7030.97 / hlittle * sqrt( omega_mz(z) / (OMm*Deltac_nonlinear(z)) ) * pow( 1.0e4 /(mu * (1+z)), 1.5 ); // optically thin return 7030.97 / hlittle * sqrt( omega_mz(z) / (OMm*Deltac_nonlinear(z)) ) * pow( VcirtoT(v_ss, mu) /(mu * (1+z)), 1.5 ); */ } /* Physical (non-linear) overdensity at virialization (relative to critical density) i.e. answer is rho / rho_crit In Einstein de sitter model = 178 (fitting formula from Bryan & Norman 1998) */ double Deltac_nonlinear(float z){ double d; d = omega_mz(z) - 1.0; return 18*PI*PI + 82*d - 39*d*d; } /* Omega matter at redshift z */ double omega_mz(float z){ return cosmo_params_ufunc->OMm*pow(1+z,3) / (cosmo_params_ufunc->OMm*pow(1+z,3) + cosmo_params_ufunc->OMl + global_params.OMr*pow(1+z,4) + global_params.OMk*pow(1+z, 2)); } /* FUNCTION dicke(z) Computes the dicke growth function at redshift z, i.e. the z dependance part of sigma References: Peebles, "Large-Scale...", pg.53 (eq. 11.16). Includes omega<=1 Nonzero Lambda case from Liddle et al, astro-ph/9512102, eqs. 6-8. and quintessence case from Wang et al, astro-ph/9804015 Normalized to dicke(z=0)=1 */ double dicke(double z){ double omegaM_z, dick_z, dick_0, x, x_0; double tiny = 1e-4; if (fabs(cosmo_params_ufunc->OMm-1.0) < tiny){ //OMm = 1 (Einstein de-Sitter) return 1.0/(1.0+z); } else if ( (cosmo_params_ufunc->OMl > (-tiny)) && (fabs(cosmo_params_ufunc->OMl+cosmo_params_ufunc->OMm+global_params.OMr-1.0) < 0.01) && (fabs(global_params.wl+1.0) < tiny) ){ //this is a flat, cosmological CONSTANT universe, with only lambda, matter and radiation //it is taken from liddle et al. omegaM_z = cosmo_params_ufunc->OMm*pow(1+z,3) / ( cosmo_params_ufunc->OMl + cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4) ); dick_z = 2.5*omegaM_z / ( 1.0/70.0 + omegaM_z*(209-omegaM_z)/140.0 + pow(omegaM_z, 4.0/7.0) ); dick_0 = 2.5*cosmo_params_ufunc->OMm / ( 1.0/70.0 + cosmo_params_ufunc->OMm*(209-cosmo_params_ufunc->OMm)/140.0 + pow(cosmo_params_ufunc->OMm, 4.0/7.0) ); return dick_z / (dick_0 * (1.0+z)); } else if ( (global_params.OMtot < (1+tiny)) && (fabs(cosmo_params_ufunc->OMl) < tiny) ){ //open, zero lambda case (peebles, pg. 53) x_0 = 1.0/(cosmo_params_ufunc->OMm+0.0) - 1.0; dick_0 = 1 + 3.0/x_0 + 3*log(sqrt(1+x_0)-sqrt(x_0))*sqrt(1+x_0)/pow(x_0,1.5); x = fabs(1.0/(cosmo_params_ufunc->OMm+0.0) - 1.0) / (1+z); dick_z = 1 + 3.0/x + 3*log(sqrt(1+x)-sqrt(x))*sqrt(1+x)/pow(x,1.5); return dick_z/dick_0; } else if ( (cosmo_params_ufunc->OMl > (-tiny)) && (fabs(global_params.OMtot-1.0) < tiny) && (fabs(global_params.wl+1) > tiny) ){ LOG_WARNING("IN WANG."); Throw ValueError; } LOG_ERROR("No growth function!"); Throw ValueError; } /* function DTDZ returns the value of dt/dz at the redshift parameter z. */ double dtdz(float z){ double x, dxdz, const1, denom, numer; x = sqrt( cosmo_params_ufunc->OMl/cosmo_params_ufunc->OMm ) * pow(1+z, -3.0/2.0); dxdz = sqrt( cosmo_params_ufunc->OMl/cosmo_params_ufunc->OMm ) * pow(1+z, -5.0/2.0) * (-3.0/2.0); const1 = 2 * sqrt( 1 + cosmo_params_ufunc->OMm/cosmo_params_ufunc->OMl ) / (3.0 * Ho) ; numer = dxdz * (1 + x*pow( pow(x,2) + 1, -0.5)); denom = x + sqrt(pow(x,2) + 1); return (const1 * numer / denom); } /* Time derivative of the growth function at z */ double ddickedt(double z){ float dz = 1e-10; double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz; double tiny = 1e-4; return (dicke(z+dz)-dicke(z))/dz/dtdz(z); // lazy non-analytic form getting if (fabs(cosmo_params_ufunc->OMm-1.0) < tiny){ //OMm = 1 (Einstein de-Sitter) return -pow(1+z,-2)/dtdz(z); } else if ( (cosmo_params_ufunc->OMl > (-tiny)) && (fabs(cosmo_params_ufunc->OMl+cosmo_params_ufunc->OMm+global_params.OMr-1.0) < 0.01) && (fabs(global_params.wl+1.0) < tiny) ){ //this is a flat, cosmological CONSTANT universe, with only lambda, matter and radiation //it is taken from liddle et al. omegaM_z = cosmo_params_ufunc->OMm*pow(1+z,3) / ( cosmo_params_ufunc->OMl + cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4) ); domegaMdz = omegaM_z*3/(1+z) - cosmo_params_ufunc->OMm*pow(1+z,3)*pow(cosmo_params_ufunc->OMl + cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4), -2) * (3*cosmo_params_ufunc->OMm*(1+z)*(1+z) + 4*global_params.OMr*pow(1+z,3)); dick_0 = cosmo_params_ufunc->OMm / ( 1.0/70.0 + cosmo_params_ufunc->OMm*(209-cosmo_params_ufunc->OMm)/140.0 + pow(cosmo_params_ufunc->OMm, 4.0/7.0) ); ddickdz = (domegaMdz/(1+z)) * (1.0/70.0*pow(omegaM_z,-2) + 1.0/140.0 + 3.0/7.0*pow(omegaM_z, -10.0/3.0)) * pow(1.0/70.0/omegaM_z + (209.0-omegaM_z)/140.0 + pow(omegaM_z, -3.0/7.0) , -2); ddickdz -= pow(1+z,-2)/(1.0/70.0/omegaM_z + (209.0-omegaM_z)/140.0 + pow(omegaM_z, -3.0/7.0)); return ddickdz / dick_0 / dtdz(z); } LOG_ERROR("No growth function!"); Throw ValueError; } /* returns the hubble "constant" (in 1/sec) at z */ double hubble(float z){ return Ho*sqrt(cosmo_params_ufunc->OMm*pow(1+z,3) + global_params.OMr*pow(1+z,4) + cosmo_params_ufunc->OMl); } /* returns hubble time (in sec), t_h = 1/H */ double t_hubble(float z){ return 1.0/hubble(z); } /* comoving distance (in cm) per unit redshift */ double drdz(float z){ return (1.0+z)*C*dtdz(z); } /* returns the case A hydrogen recombination coefficient (Abel et al. 1997) in cm^3 s^-1*/ double alpha_A(double T){ double logT, ans; logT = log(T/(double)1.1604505e4); ans = pow(E, -28.6130338 - 0.72411256*logT - 2.02604473e-2*pow(logT, 2) - 2.38086188e-3*pow(logT, 3) - 3.21260521e-4*pow(logT, 4) - 1.42150291e-5*pow(logT, 5) + 4.98910892e-6*pow(logT, 6) + 5.75561414e-7*pow(logT, 7) - 1.85676704e-8*pow(logT, 8) - 3.07113524e-9 * pow(logT, 9)); return ans; } /* returns the case B hydrogen recombination coefficient (Spitzer 1978) in cm^3 s^-1*/ double alpha_B(double T){ return alphaB_10k * pow (T/1.0e4, -0.75); } /* Function NEUTRAL_FRACTION returns the hydrogen neutral fraction, chi, given: hydrogen density (pcm^-3) gas temperature (10^4 K) ionization rate (1e-12 s^-1) */ double neutral_fraction(double density, double T4, double gamma, int usecaseB){ double chi, b, alpha, corr_He = 1.0/(4.0/global_params.Y_He - 3); if (usecaseB) alpha = alpha_B(T4*1e4); else alpha = alpha_A(T4*1e4); gamma *= 1e-12; // approximation chi << 1 chi = (1+corr_He)*density * alpha / gamma; if (chi < TINY){ return 0;} if (chi < 1e-5) return chi; // this code, while mathematically accurate, is numerically buggy for very small x_HI, so i will use valid approximation x_HI <<1 above when x_HI < 1e-5, and this otherwise... the two converge seemlessly //get solutions of quadratic of chi (neutral fraction) b = -2 - gamma / (density*(1+corr_He)*alpha); chi = ( -b - sqrt(b*b - 4) ) / 2.0; //correct root return chi; } /* function HeI_ion_crosssec returns the HI ionization cross section at parameter frequency (taken from Verner et al (1996) */ double HeI_ion_crosssec(double nu){ double x,y,Fy; if (nu < HeI_NUIONIZATION) return 0; x = nu/NU_over_EV/13.61 - 0.4434; y = sqrt(x*x + pow(2.136, 2)); return 9.492e-16*((x-1)*(x-1) + 2.039*2.039) * pow(y, (0.5 * 3.188 - 5.5)) * pow(1.0 + sqrt(y/1.469), -3.188); } /* function HeII_ion_crosssec returns the HeII ionization cross section at parameter frequency (taken from Osterbrock, pg. 14) */ double HeII_ion_crosssec(double nu){ double epsilon, Z = 2; if (nu < HeII_NUIONIZATION) return 0; if (nu == HeII_NUIONIZATION) nu+=TINY; epsilon = sqrt( nu/HeII_NUIONIZATION - 1); return (6.3e-18)/Z/Z * pow(HeII_NUIONIZATION/nu, 4) * pow(E, 4-(4*atan(epsilon)/epsilon)) / (1-pow(E, -2*PI/epsilon)); } /* function HI_ion_crosssec returns the HI ionization cross section at parameter frequency (taken from Osterbrock, pg. 14) */ double HI_ion_crosssec(double nu){ double epsilon, Z = 1; if (nu < NUIONIZATION) return 0; if (nu == NUIONIZATION) nu+=TINY; epsilon = sqrt( nu/NUIONIZATION - 1); return (6.3e-18)/Z/Z * pow(NUIONIZATION/nu, 4) * pow(E, 4-(4*atan(epsilon)/epsilon)) / (1-pow(E, -2*PI/epsilon)); } /* Return the thomspon scattering optical depth from zstart to zend through fully ionized IGM. The hydrogen reionization history is given by the zarry and xHarry parameters, in increasing redshift order of length len.*/ typedef struct{ float *z, *xH; int len; } tau_e_params; double dtau_e_dz(double z, void *params){ float xH, xi; int i=1; tau_e_params p = *(tau_e_params *)params; if ((p.len == 0) || !(p.z)) { return (1+z)*(1+z)*drdz(z); } else{ // find where we are in the redshift array if (p.z[0]>z) // ionization fraction is 1 prior to start of array return (1+z)*(1+z)*drdz(z); while ( (i < p.len) && (p.z[i] < z) ) {i++;} if (i == p.len) return 0; // linearly interpolate in redshift xH = p.xH[i-1] + (p.xH[i] - p.xH[i-1])/(p.z[i] - p.z[i-1]) * (z - p.z[i-1]); xi = 1.0-xH; if (xi<0){ LOG_WARNING("in taue: funny business xi=%e, changing to 0.", xi); xi=0; } if (xi>1){ LOG_WARNING("in taue: funny business xi=%e, changing to 1", xi); xi=1; } return xi*(1+z)*(1+z)*drdz(z); } } double tau_e(float zstart, float zend, float *zarry, float *xHarry, int len){ double prehelium, posthelium, error; gsl_function F; double rel_tol = 1e-3; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); tau_e_params p; if (zstart >= zend){ LOG_ERROR("in tau_e: First parameter must be smaller than the second.\n"); Throw ValueError; } F.function = &dtau_e_dz; p.z = zarry; p.xH = xHarry; p.len = len; F.params = &p; if ((len > 0) && zarry) zend = zarry[len-1] - FRACT_FLOAT_ERR; if (zend > global_params.Zreion_HeII){// && (zstart < Zreion_HeII)){ if (zstart < global_params.Zreion_HeII){ gsl_integration_qag (&F, global_params.Zreion_HeII, zstart, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &prehelium, &error); gsl_integration_qag (&F, zend, global_params.Zreion_HeII, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &posthelium, &error); } else{ prehelium = 0; gsl_integration_qag (&F, zend, zstart, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &posthelium, &error); } } else{ posthelium = 0; gsl_integration_qag (&F, zend, zstart, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &prehelium, &error); } gsl_integration_workspace_free (w); return SIGMAT * ( (N_b0+He_No)*prehelium + N_b0*posthelium ); } float ComputeTau(struct UserParams *user_params, struct CosmoParams *cosmo_params, int NPoints, float *redshifts, float *global_xHI) { int i; float tau; Broadcast_struct_global_UF(user_params,cosmo_params); tau = tau_e(0, redshifts[NPoints-1], redshifts, global_xHI, NPoints); return tau; } void writeUserParams(struct UserParams *p){ LOG_INFO("UserParams: [HII_DIM=%d, DIM=%d, BOX_LEN=%f, HMF=%d, POWER_SPECTRUM=%d, USE_RELATIVE_VELOCITIES=%d, N_THREADS=%d, PERTURB_ON_HIGH_RES=%d, NO_RNG=%d, USE_FFTW_WISDOM=%d, USE_INTERPOLATION_TABLES=%d, FAST_FCOLL_TABLES=%d]", p->HII_DIM, p->DIM, p->BOX_LEN, p->HMF, p->POWER_SPECTRUM, p->USE_RELATIVE_VELOCITIES, p->N_THREADS, p->PERTURB_ON_HIGH_RES, p->NO_RNG, p->USE_FFTW_WISDOM, p->USE_INTERPOLATION_TABLES, p->FAST_FCOLL_TABLES); } void writeCosmoParams(struct CosmoParams *p){ LOG_INFO("CosmoParams: [SIGMA_8=%f, hlittle=%f, OMm=%f, OMl=%f, OMb=%f, POWER_INDEX=%f]", p->SIGMA_8, p->hlittle, p->OMm, p->OMl, p->OMb, p->POWER_INDEX); } void writeAstroParams(struct FlagOptions *fo, struct AstroParams *p){ if(fo->USE_MASS_DEPENDENT_ZETA) { LOG_INFO("AstroParams: [HII_EFF_FACTOR=%f, ALPHA_STAR=%f, F_ESC10=%f (F_ESC7_MINI=%f), ALPHA_ESC=%f, M_TURN=%f, R_BUBBLE_MAX=%f, L_X=%e (L_X_MINI=%e), NU_X_THRESH=%f, X_RAY_SPEC_INDEX=%f, F_STAR10=%f (F_STAR7_MINI=%f), t_STAR=%f, N_RSD_STEPS=%f]", p->HII_EFF_FACTOR, p->ALPHA_STAR, p->F_ESC10,p->F_ESC7_MINI, p->ALPHA_ESC, p->M_TURN, p->R_BUBBLE_MAX, p->L_X, p->L_X_MINI, p->NU_X_THRESH, p->X_RAY_SPEC_INDEX, p->F_STAR10, p->F_STAR7_MINI, p->t_STAR, p->N_RSD_STEPS); } else { LOG_INFO("AstroParams: [HII_EFF_FACTOR=%f, ION_Tvir_MIN=%f, X_RAY_Tvir_MIN=%f, R_BUBBLE_MAX=%f, L_X=%e, NU_X_THRESH=%f, X_RAY_SPEC_INDEX=%f, F_STAR10=%f, t_STAR=%f, N_RSD_STEPS=%f]", p->HII_EFF_FACTOR, p->ION_Tvir_MIN, p->X_RAY_Tvir_MIN, p->R_BUBBLE_MAX, p->L_X, p->NU_X_THRESH, p->X_RAY_SPEC_INDEX, p->F_STAR10, p->t_STAR, p->N_RSD_STEPS); } } void writeFlagOptions(struct FlagOptions *p){ LOG_INFO("FlagOptions: [USE_HALO_FIELD=%d, USE_MINI_HALOS=%d, USE_MASS_DEPENDENT_ZETA=%d, SUBCELL_RSD=%d, INHOMO_RECO=%d, USE_TS_FLUCT=%d, M_MIN_in_Mass=%d, PHOTON_CONS=%d]", p->USE_HALO_FIELD, p->USE_MINI_HALOS, p->USE_MASS_DEPENDENT_ZETA, p->SUBCELL_RSD, p->INHOMO_RECO, p->USE_TS_FLUCT, p->M_MIN_in_Mass, p->PHOTON_CONS); } char *print_output_header(int print_pid, const char *name){ char * pid = malloc(12*sizeof(char)); if(print_pid){ sprintf(pid, "<%d>\t", getpid()); }else{ sprintf(pid, ""); } printf("%s%s:\n", pid, name); return (pid); } void print_corners_real(float *x, int size){ int s = size-1; int i,j,k; for(i=0;i<size;i=i+s){ for(j=0;j<size;j=j+s){ for(k=0;k<size;k=k+s){ printf("%f, ", x[k + size*(j + size*i)]); } } } printf("\n"); } void inspectInitialConditions(struct InitialConditions *x, int print_pid, int print_corners, int print_first, int HII_DIM){ int i; char *pid = print_output_header(print_pid, "InitialConditions"); if(print_first){ printf("%s\tFirstRow: ",pid); printf("%s\t\tlowres_density: "); for(i=0;i<10;i++){ printf("%f, ", x->lowres_density[i]); } printf("\n"); printf("%s\t\tlowres_vx : "); for(i=0;i<10;i++){ printf("%f, ", x->lowres_vx[i]); } printf("\n"); printf("%s\t\tlowres_vx_2LPT: "); for(i=0;i<10;i++){ printf("%f, ", x->lowres_vx_2LPT[i]); } printf("\n"); } if(print_corners){ printf("%s\tCorners: ",pid); printf("%s\t\tlowres_density: ",pid); print_corners_real(x->lowres_density, HII_DIM); printf("%s\t\tlowres_vx : ", pid); print_corners_real(x->lowres_vx, HII_DIM); printf("%s\t\tlowres_vx_2LPT: ", pid); print_corners_real(x->lowres_vx_2LPT, HII_DIM); } } void inspectPerturbedField(struct PerturbedField *x, int print_pid, int print_corners, int print_first, int HII_DIM){ int i; char *pid = print_output_header(print_pid, "PerturbedField"); if(print_first){ printf("%s\tFirstRow: \n",pid); printf("%s\t\tdensity: ", pid); for(i=0;i<10;i++){ printf("%f, ", x->density[i]); } printf("\n"); printf("%s\t\tvelocity: ", pid); for(i=0;i<10;i++){ printf("%f, ", x->velocity[i]); } printf("\n"); } if(print_corners){ printf("%s\tCorners: \n",pid); printf("%s\t\tdensity: ",pid); print_corners_real(x->density, HII_DIM); printf("%s\t\tvelocity: ", pid); print_corners_real(x->velocity, HII_DIM); } } void inspectTsBox(struct TsBox *x, int print_pid, int print_corners, int print_first, int HII_DIM){ int i; char *pid = print_output_header(print_pid, "TsBox"); if(print_first){ printf("%s\tFirstRow: ",pid); printf("%s\t\tTs_box : "); for(i=0;i<10;i++){ printf("%f, ", x->Ts_box[i]); } printf("\n"); printf("%s\t\tx_e_box: "); for(i=0;i<10;i++){ printf("%f, ", x->x_e_box[i]); } printf("\n"); printf("%s\t\tTk_box : "); for(i=0;i<10;i++){ printf("%f, ", x->Tk_box[i]); } printf("\n"); } if(print_corners){ printf("%s\tCorners: ",pid); printf("%s\t\tTs_box : ",pid); print_corners_real(x->Ts_box, HII_DIM); printf("%s\t\tx_e_box: ", pid); print_corners_real(x->x_e_box, HII_DIM); printf("%s\t\tTk_box : ", pid); print_corners_real(x->Tk_box, HII_DIM); } } void inspectIonizedBox(struct IonizedBox *x, int print_pid, int print_corners, int print_first, int HII_DIM){ int i; char *pid = print_output_header(print_pid, "IonizedBox"); if(print_first){ printf("%s\tFirstRow: ",pid); printf("%s\t\txH_box : "); for(i=0;i<10;i++){ printf("%f, ", x->xH_box[i]); } printf("\n"); printf("%s\t\tGamma12_box: "); for(i=0;i<10;i++){ printf("%f, ", x->Gamma12_box[i]); } printf("\n"); printf("%s\t\tz_re_box : "); for(i=0;i<10;i++){ printf("%f, ", x->z_re_box[i]); } printf("\n"); printf("%s\t\tdNrec_box : "); for(i=0;i<10;i++){ printf("%f, ", x->dNrec_box[i]); } printf("\n"); } if(print_corners){ printf("%s\tCorners: ",pid); printf("%s\t\txH_box : ",pid); print_corners_real(x->xH_box, HII_DIM); printf("%s\t\tGamma12_box: ", pid); print_corners_real(x->Gamma12_box, HII_DIM); printf("%s\t\tz_re_box : ", pid); print_corners_real(x->z_re_box, HII_DIM); printf("%s\t\tdNrec_box : ", pid); print_corners_real(x->dNrec_box, HII_DIM); } } void inspectBrightnessTemp(struct BrightnessTemp *x, int print_pid, int print_corners, int print_first, int HII_DIM){ int i; char *pid = print_output_header(print_pid, "BrightnessTemp"); if(print_first){ printf("%s\tFirstRow: ",pid); printf("%s\t\tbrightness_temp: "); for(i=0;i<10;i++){ printf("%f, ", x->brightness_temp[i]); } printf("\n"); } if(print_corners){ printf("%s\tCorners: ",pid); printf("%s\t\tbrightness_temp: ",pid); print_corners_real(x->brightness_temp, HII_DIM); } } double atomic_cooling_threshold(float z){ return TtoM(z, 1e4, 0.59); } double molecular_cooling_threshold(float z){ return TtoM(z, 600, 1.22); } double lyman_werner_threshold(float z, float J_21_LW){ // this follows Visbal+15, which is taken as the optimal fit from Fialkov+12 which // was calibrated with the simulations of Stacy+11 and Greif+11; double mcrit_noLW = 3.314e7 * pow( 1.+z, -1.5); return mcrit_noLW * (1. + 22.8685 * pow(J_21_LW, 0.47)); } double reionization_feedback(float z, float Gamma_halo_HII, float z_IN){ if (z_IN<=1e-19) return 1e-40; return REION_SM13_M0 * pow(HALO_BIAS * Gamma_halo_HII, REION_SM13_A) * pow((1.+z)/10, REION_SM13_B) * pow(1 - pow((1.+z)/(1.+z_IN), REION_SM13_C), REION_SM13_D); } /* The following functions are simply for testing the exception framework */ void FunctionThatThrows(){ Throw(ParameterError); } int SomethingThatCatches(bool sub_func){ // A simple function that catches a thrown error. int status; Try{ if(sub_func) FunctionThatThrows(); else Throw(ParameterError); } Catch(status){ return status; } return 0; } int FunctionThatCatches(bool sub_func, bool pass, double *result){ int status; if(!pass){ Try{ if(sub_func) FunctionThatThrows(); else Throw(ParameterError); } Catch(status){ LOG_DEBUG("Caught the problem with status %d.", status); return status; } } *result = 5.0; return 0; }
4.c
#include <stdio.h> #include <omp.h> int main() { int x=0, size=16; omp_set_num_threads(size); #pragma omp parallel shared(x) { x=x+1; } printf("%d\n",x); return 0; }
GB_binop__bset_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_03__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int32) // C=scalar+B GB (_bind1st__bset_int32) // C=scalar+B' GB (_bind1st_tran__bset_int32) // C=A+scalar GB (_bind2nd__bset_int32) // C=A'+scalar GB (_bind2nd_tran__bset_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITSET (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bset_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bset_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
eval_cf.c
/******************************************************************************* * 2pt_box/eval_cf.c: this file is part of the FCFC program. * FCFC: Fast Correlation Function Calculator. * Github repository: https://github.com/cheng-zhao/FCFC * Copyright (c) 2020 -- 2021 Cheng Zhao <zhaocheng03@gmail.com> [MIT license] *******************************************************************************/ #include "eval_cf.h" #include "count_func.h" #include "read_file.h" #include "read_res.h" #include "save_res.h" #include "legpoly.h" #include "build_tree.h" #include <stdlib.h> #ifdef OMP #include <omp.h> #endif /*============================================================================*\ Macros for error handling \*============================================================================*/ #define CLEAN_TREE \ for (int ii = 0; ii < cf->ncat; ii++) \ tree_destroy(tree[ii], FCFC_TREE_TYPE_KDTREE); \ free(tree); /*============================================================================*\ Functions for steps of correlation function evaluations \*============================================================================*/ /****************************************************************************** Function `eval_pairs`: Evaluate pair counts for the input catalogs. Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_pairs(const CONF *conf, CF *cf) { /* Allocate memory for trees. */ void **tree; if (!(tree = malloc(sizeof(void *) * cf->ncat))) { P_ERR("failed to allocate memory for trees\n"); return FCFC_ERR_MEMORY; } for (int i = 0; i < cf->ncat; i++) tree[i] = NULL; int ny; if (cf->bintype == FCFC_BIN_SPI) ny = cf->np; else ny = cf->nmu; /* cf->bintype == FCFC_BIN_ISO or FCFC_BIN_SMU */ for (int i = 0; i < cf->npc; i++) { /* Read pair counts from file if applicable. */ if (!cf->comp_pc[i]) { printf("Reading %s pairs ...", conf->pc[i]); if (conf->verbose) printf("\n Filename: %s\n", conf->pcout[i]); fflush(stdout); int e = read_pair_count(conf->pcout[i], cf->ns, ny, cf->ncnt[i]); if (e) { CLEAN_TREE; return e; } printf(FMT_DONE); continue; } int cat[2]; cat[0] = cf->pc_idx[0][i]; cat[1] = cf->pc_idx[1][i]; /* Read catalogue and construct the tree if necessary. */ for (int j = 0; j < 2; j++) { int idx = cat[j]; if (!tree[idx]) { tree[idx] = tree_create(conf, cf, idx, FCFC_TREE_TYPE_KDTREE); if (!tree[idx]) { CLEAN_TREE; return FCFC_ERR_TREE; } } } /* Count pairs. */ printf("Counting %c%c pairs ...", cf->label[cat[0]], cf->label[cat[1]]); if (conf->verbose) printf("\n"); fflush(stdout); if (cat[0] == cat[1]) { /* auto pairs */ count_pairs(tree[cat[0]], tree[cat[0]], cf, cf->cnt[i], true); /* Double auto pairs. */ for (size_t k = 0; k < cf->ntot; k++) cf->cnt[i][k] *= 2; cf->norm[i] = (double) cf->ndata[cat[0]] * (cf->ndata[cat[0]] - 1); } else { /* cross counts */ count_pairs(tree[cat[0]], tree[cat[1]], cf, cf->cnt[i], false); cf->norm[i] = (double) cf->ndata[cat[0]] * cf->ndata[cat[1]]; } /* Normalise pair counts. */ for (size_t k = 0; k < cf->ntot; k++) cf->ncnt[i][k] = cf->cnt[i][k] / cf->norm[i]; /* Save pair counts. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_PAIR_COUNT); if (e) { CLEAN_TREE; return e; } /* Release memory if necessary. */ for (int j = 0; j < 2; j++) { bool free_data = true; for (int k = i + 1; k < cf->npc; k++) { if (cat[j] == cf->pc_idx[0][k] || cat[j] == cf->pc_idx[1][k]) { free_data = false; break; } } if (free_data) { free(cf->data[cat[j]]); cf->data[cat[j]] = NULL; tree_destroy(tree[cat[j]], FCFC_TREE_TYPE_KDTREE); tree[cat[j]] = NULL; } } printf(FMT_DONE); } CLEAN_TREE; /* Compute analytical RR if necessary. */ if (cf->rr) { if (cf->bintype == FCFC_BIN_SPI) { double fac = 2 * M_PI / ((double) cf->bsize * cf->bsize * cf->bsize); for (int i = 0; i < cf->np; i++) { double dpi = cf->pbin[i + 1] - cf->pbin[i]; for (int j = 0; j < cf->ns; j++) { cf->rr[j + i * cf->ns] = fac * dpi * (cf->s2bin[j + 1] - cf->s2bin[j]); } } } else { /* cf->bintype == FCFC_BIN_ISO or FCFC_BIN_SMU */ double fac = 4 * M_PI / (3 * cf->nmu * (double) cf->bsize * cf->bsize * cf->bsize); for (int i = 0; i < cf->ns; i++) { double rr = fac * (cf->s2bin[i + 1] * cf->sbin[i + 1] - cf->s2bin[i] * cf->sbin[i]); for (int j = 0; j < cf->nmu; j++) cf->rr[i + j * cf->ns] = rr; } } } return 0; } /****************************************************************************** Function `eval_cf_exp`: Evaluate correlation functions given the expressions for estimators. Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_cf_exp(const CONF *conf, CF *cf) { printf("Evaluate correlation function estimators ..."); if (conf->verbose) printf("\n"); fflush(stdout); /* Prepare the array of normalised pair counts, for libast evaluations. */ #ifdef OMP double *pc = calloc((size_t) cf->nthread * (cf->npc + 1), sizeof(double)); #else double *pc = calloc(cf->npc + 1, sizeof(double)); #endif if (!pc) { P_ERR("failed to allocate memory for 2PCF evaluation\n"); return FCFC_ERR_MEMORY; } for (int i = 0; i < cf->ncf; i++) { #ifdef OMP #pragma omp parallel num_threads(cf->nthread) firstprivate(pc) { const int tid = omp_get_thread_num(); pc += (size_t) tid * (cf->npc + 1); #pragma omp for #endif for (size_t j = 0; j < cf->ntot; j++) { /* Set pair counts to be passed to libast. */ for (int k = 0; k < cf->npc; k++) pc[k] = cf->ncnt[k][j]; if (cf->rr) pc[cf->npc] = cf->rr[j]; /* Evaluate the 2PCF. */ if (ast_eval_num(cf->ast_cf[i], cf->cf[i] + j, pc, cf->npc + 1)) { ast_perror(cf->ast_cf[i], stderr, FMT_ERR " failed to evaluate 2PCF:"); #ifdef OMP exit(FCFC_ERR_AST); #else free(pc); return FCFC_ERR_AST; #endif } } #ifdef OMP } #endif /* Save the correlation function. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_RAW); if (e) { free(pc); return e; } } free(pc); printf(FMT_DONE); return 0; } /****************************************************************************** Function `eval_cf_mp`: Evaluate correlation function multipoles given xi(s,mu). Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_cf_mp(const CONF *conf, CF *cf) { printf("Compute correlation function multipoles ..."); if (conf->verbose) printf("\n"); fflush(stdout); for (int i = 0; i < cf->ncf; i++) { for (int l = 0; l < cf->nl; l++) { int ell = cf->poles[l]; double fac = (2 * ell + 1) / (double) cf->nmu; for (int j = 0; j < cf->nmu; j++) { double mu = (j + 0.5) / (double) cf->nmu; for (int k = 0; k < cf->ns; k++) { cf->mp[i][k + l * cf->ns] += cf->cf[i][k + j * cf->ns] * fac * legpoly(ell, mu); } } } /* Save the correlation function multipoles. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG); if (e) return e; } printf(FMT_DONE); return 0; } /****************************************************************************** Function `eval_cf_wp`: Evaluate projected correlation functions given xi(s_perp,pi). Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_cf_wp(const CONF *conf, CF *cf) { printf("Compute projected correlation functions ..."); if (conf->verbose) printf("\n"); fflush(stdout); for (int i = 0; i < cf->ncf; i++) { for (int j = 0; j < cf->np; j++) { double dpi = cf->pbin[j + 1] - cf->pbin[j]; for (int k = 0; k < cf->ns; k++) { cf->wp[i][k] += 2 * cf->cf[i][k + j * cf->ns] * dpi; } } /* Save the projected correlation functions. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG); if (e) return e; } printf(FMT_DONE); return 0; } /*============================================================================*\ Interface for correlation function evaluations \*============================================================================*/ /****************************************************************************** Function `eval_cf`: Evaluate correlation functions. Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ int eval_cf(const CONF *conf, CF *cf) { int e; if ((e = eval_pairs(conf, cf))) return e; if (cf->ncf) if ((e = eval_cf_exp(conf, cf))) return e; if (cf->nl) if ((e = eval_cf_mp(conf, cf))) return e; if (cf->comp_wp) if ((e = eval_cf_wp(conf, cf))) return e; return 0; }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "coders/coders-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; Quantum *p; ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; const unsigned char *p; Quantum *q; ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { *(p+1)+=*p; p++; } p++; remaining-=row_size; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { unsigned char *p, *q; ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; unsigned char *p; size_t count, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image,pixels,count,row_size); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; ssize_t i; ssize_t count, index, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickBooleanType status; status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD"); if (status == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t image_list_length; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; switch (psd_info.mode) { case LabMode: { (void) SetImageColorspace(image,LabColorspace,exception); break; } case CMYKMode: { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); break; } case BitmapMode: case GrayscaleMode: case DuotoneMode: { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); break; } case IndexedMode: { psd_info.min_channels=1; break; } case MultichannelMode: { if ((psd_info.channels > 0) && (psd_info.channels < 3)) { psd_info.min_channels=psd_info.channels; (void) SetImageColorspace(image,GRAYColorspace,exception); } break; } } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); image_list_length=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } image_list_length=GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); if (merged == (Image *) NULL) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; ssize_t i, j; unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; const Quantum *p; ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickBooleanType status; status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD"); if (status == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; ssize_t i; size_t length, num_channels; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { const char *option; CompressionType compression; MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); option=GetImageOption(image_info,"psd:write-layers"); if (IsStringFalse(option) != MagickTrue) { status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); (void) WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); (void) WriteBlobMSBLong(image,0); /* user mask data */ } /* Write composite image. */ compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
datascope_parallel.c
#include <stdio.h> int static_data = 0; int main() { int i; int dynamic_data = 0; #pragma omp parallel { //start parallel int local = 0; for (i=0; i<1000; i++) { local += 1; dynamic_data += 1; static_data += 1; } printf("localの値=%d\n", local); } //end parallel printf("dynamic_dataの値=%d\n", dynamic_data); printf("static_dataの値=%d\n", static_data); return 0; }
diagmm_x_dia_u_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < mat->rows; ++r) { alpha_mul(y[index2(c,r,ldy)],y[index2(c,r,ldy)],beta); alpha_madde(y[index2(c,r,ldy)],x[index2(c,r,ldx)],alpha); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
compare.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP AAA RRRR EEEEE % % C O O MM MM P P A A R R E % % C O O M M M PPPP AAAAA RRRR EEE % % C O O M M P A A R R E % % CCCC OOO M M P A A R R EEEEE % % % % % % MagickCore Image Comparison Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/statistic.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p a r e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompareImages() compares one or more pixel channels of an image to a % reconstructed image and returns the difference image. % % The format of the CompareImages method is: % % Image *CompareImages(const Image *image,const Image *reconstruct_image, % const MetricType metric,double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) != 0) channels++; } return(channels == 0 ? (size_t) 1 : channels); } MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image, const MetricType metric,double *distortion,ExceptionInfo *exception) { CacheView *highlight_view, *image_view, *reconstruct_view; const char *artifact; double fuzz; Image *clone_image, *difference_image, *highlight_image; MagickBooleanType status; PixelInfo highlight, lowlight, masklight; RectangleInfo geometry; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageDistortion(image,reconstruct_image,metric,distortion, exception); if (status == MagickFalse) return((Image *) NULL); columns=MagickMax(image->columns,reconstruct_image->columns); rows=MagickMax(image->rows,reconstruct_image->rows); SetGeometry(image,&geometry); geometry.width=columns; geometry.height=rows; clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); (void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception); difference_image=ExtentImage(clone_image,&geometry,exception); clone_image=DestroyImage(clone_image); if (difference_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception); highlight_image=CloneImage(image,columns,rows,MagickTrue,exception); if (highlight_image == (Image *) NULL) { difference_image=DestroyImage(difference_image); return((Image *) NULL); } status=SetImageStorageClass(highlight_image,DirectClass,exception); if (status == MagickFalse) { difference_image=DestroyImage(difference_image); highlight_image=DestroyImage(highlight_image); return((Image *) NULL); } (void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception); (void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception); (void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception); artifact=GetImageArtifact(image,"compare:highlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception); (void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception); artifact=GetImageArtifact(image,"compare:lowlight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception); (void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception); artifact=GetImageArtifact(image,"compare:masklight-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception); /* Generate difference image. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); highlight_view=AcquireAuthenticCacheView(highlight_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,highlight_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p, *magick_restrict q; Quantum *magick_restrict r; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) || (r == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickStatusType difference; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { SetPixelViaPixelInfo(highlight_image,&masklight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance, pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance=pixel*pixel; if (distance >= fuzz) { difference=MagickTrue; break; } } if (difference == MagickFalse) SetPixelViaPixelInfo(highlight_image,&lowlight,r); else SetPixelViaPixelInfo(highlight_image,&highlight,r); p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); r+=GetPixelChannels(highlight_image); } sync=SyncCacheViewAuthenticPixels(highlight_view,exception); if (sync == MagickFalse) status=MagickFalse; } highlight_view=DestroyCacheView(highlight_view); reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); (void) CompositeImage(difference_image,highlight_image,image->compose, MagickTrue,0,0,exception); highlight_image=DestroyImage(highlight_image); if (status == MagickFalse) difference_image=DestroyImage(difference_image); return(difference_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortion() compares one or more pixel channels of an image to a % reconstructed image and returns the specified distortion metric. % % The format of the GetImageDistortion method is: % % MagickBooleanType GetImageDistortion(const Image *image, % const Image *reconstruct_image,const MetricType metric, % double *distortion,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o distortion: the computed distortion between the images. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double fuzz; MagickBooleanType status; size_t columns, rows; ssize_t y; /* Compute the absolute difference in pixels between two images. */ status=MagickTrue; fuzz=GetFuzzyColorDistance(image,reconstruct_image); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; MagickBooleanType difference; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } difference=MagickFalse; Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance, pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q); else pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q); distance=pixel*pixel; if (distance >= fuzz) { channel_distortion[i]++; difference=MagickTrue; } } if (difference != MagickFalse) channel_distortion[CompositePixelChannel]++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetAbsoluteDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static MagickBooleanType GetFuzzDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetFuzzDistortion) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]); return(status); } static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,channel,q))); else distance=QuantumScale*fabs((double) (Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q))); channel_distortion[i]+=distance; channel_distortion[CompositePixelChannel]+=distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=(double) GetImageChannels(image); return(status); } static MagickBooleanType GetMeanErrorPerPixel(Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; double area, maximum_error, mean_error; size_t columns, rows; ssize_t y; status=MagickTrue; area=0.0; maximum_error=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,channel,q))); else distance=fabs((double) (Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q))); distortion[i]+=distance; distortion[CompositePixelChannel]+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area; image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area; image->error.normalized_maximum_error=QuantumScale*maximum_error; return(status); } static MagickBooleanType GetMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area; MagickBooleanType status; ssize_t j; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) reduction(+:area) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image, channel,q)); else distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image, channel,q)); channel_distortion[i]+=distance*distance; channel_distortion[CompositePixelChannel]+=distance*distance; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetMeanSquaredError) #endif for (j=0; j <= MaxPixelChannels; j++) distortion[j]+=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); area=PerceptibleReciprocal(area); for (j=0; j <= MaxPixelChannels; j++) distortion[j]*=area; distortion[CompositePixelChannel]/=GetImageChannels(image); return(status); } static MagickBooleanType GetNormalizedCrossCorrelationDistortion( const Image *image,const Image *reconstruct_image,double *distortion, ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *image_view, *reconstruct_view; ChannelStatistics *image_statistics, *reconstruct_statistics; double area; MagickBooleanType status; MagickOffsetType progress; ssize_t i; size_t columns, rows; ssize_t y; /* Normalize to account for variation due to lighting and exposure condition. */ image_statistics=GetImageStatistics(image,exception); reconstruct_statistics=GetImageStatistics(reconstruct_image,exception); if ((image_statistics == (ChannelStatistics *) NULL) || (reconstruct_statistics == (ChannelStatistics *) NULL)) { if (image_statistics != (ChannelStatistics *) NULL) image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); if (reconstruct_statistics != (ChannelStatistics *) NULL) reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); return(MagickFalse); } status=MagickTrue; progress=0; for (i=0; i <= MaxPixelChannels; i++) distortion[i]=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); area=0.0; image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } area++; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } area=PerceptibleReciprocal(area); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; break; } for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) { distortion[i]+=area*QuantumScale*(p[i]- image_statistics[channel].mean)*(GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } else { distortion[i]+=area*QuantumScale*(Sa*p[i]- image_statistics[channel].mean)*(Da*GetPixelChannel( reconstruct_image,channel,q)- reconstruct_statistics[channel].mean); } } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); /* Divide by the standard deviation. */ distortion[CompositePixelChannel]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma; PixelChannel channel = GetPixelChannelChannel(image,i); gamma=image_statistics[channel].standard_deviation* reconstruct_statistics[channel].standard_deviation; gamma=PerceptibleReciprocal(gamma); distortion[i]=QuantumRange*gamma*distortion[i]; distortion[CompositePixelChannel]+=distortion[i]*distortion[i]; } distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/ GetImageChannels(image)); /* Free resources. */ reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory( reconstruct_statistics); image_statistics=(ChannelStatistics *) RelinquishMagickMemory( image_statistics); return(status); } static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; MagickBooleanType status; size_t columns, rows; ssize_t y; status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t j, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double Da, Sa; ssize_t i; if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) || (GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2))) { p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); continue; } Sa=QuantumScale*GetPixelAlpha(image,p); Da=QuantumScale*GetPixelAlpha(reconstruct_image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; if (channel == AlphaPixelChannel) distance=QuantumScale*fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,channel,q))); else distance=QuantumScale*fabs((double) (Sa*p[i]-Da* GetPixelChannel(reconstruct_image,channel,q))); if (distance > channel_distortion[i]) channel_distortion[i]=distance; if (distance > channel_distortion[CompositePixelChannel]) channel_distortion[CompositePixelChannel]=distance; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPeakAbsoluteError) #endif for (j=0; j <= MaxPixelChannels; j++) if (channel_distortion[j] > distortion[j]) distortion[j]=channel_distortion[j]; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(status); } static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) if (fabs(distortion[i]) < MagickEpsilon) distortion[i]=INFINITY; else distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]); return(status); } static MagickBooleanType GetPerceptualHashDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { ChannelPerceptualHash *channel_phash, *reconstruct_phash; const char *artifact; MagickBooleanType normalize; ssize_t channel; /* Compute perceptual hash in the sRGB colorspace. */ channel_phash=GetImagePerceptualHash(image,exception); if (channel_phash == (ChannelPerceptualHash *) NULL) return(MagickFalse); reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception); if (reconstruct_phash == (ChannelPerceptualHash *) NULL) { channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( channel_phash); return(MagickFalse); } artifact=GetImageArtifact(image,"phash:normalize"); normalize=(artifact == (const char *) NULL) || (IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (channel=0; channel < MaxPixelChannels; channel++) { double difference; ssize_t i; difference=0.0; for (i=0; i < MaximumNumberOfImageMoments; i++) { double alpha, beta; ssize_t j; for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++) { alpha=channel_phash[channel].phash[j][i]; beta=reconstruct_phash[channel].phash[j][i]; if (normalize == MagickFalse) difference+=(beta-alpha)*(beta-alpha); else difference=sqrt((beta-alpha)*(beta-alpha)/ channel_phash[0].number_channels); } } distortion[channel]+=difference; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetPerceptualHashDistortion) #endif distortion[CompositePixelChannel]+=difference; } /* Free resources. */ reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory( reconstruct_phash); channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash); return(MagickTrue); } static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=sqrt(distortion[i]); return(status); } static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { #define SSIMRadius 5.0 #define SSIMSigma 1.5 #define SSIMBlocksize 8 #define SSIMK1 0.01 #define SSIMK2 0.03 #define SSIML 1.0 CacheView *image_view, *reconstruct_view; char geometry[MagickPathExtent]; const char *artifact; double c1, c2, radius, sigma; KernelInfo *kernel_info; MagickBooleanType status; ssize_t i; size_t columns, rows; ssize_t y; /* Compute structural similarity index @ https://en.wikipedia.org/wiki/Structural_similarity. */ radius=SSIMRadius; artifact=GetImageArtifact(image,"compare:ssim-radius"); if (artifact != (const char *) NULL) radius=StringToDouble(artifact,(char **) NULL); sigma=SSIMSigma; artifact=GetImageArtifact(image,"compare:ssim-sigma"); if (artifact != (const char *) NULL) sigma=StringToDouble(artifact,(char **) NULL); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); c1=pow(SSIMK1*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k1"); if (artifact != (const char *) NULL) c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); c2=pow(SSIMK2*SSIML,2.0); artifact=GetImageArtifact(image,"compare:ssim-k2"); if (artifact != (const char *) NULL) c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0); status=MagickTrue; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,reconstruct_image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { double channel_distortion[MaxPixelChannels+1]; const Quantum *magick_restrict p, *magick_restrict q; ssize_t i, x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y- ((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/ 2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width, kernel_info->height,exception); if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL)) { status=MagickFalse; continue; } (void) memset(channel_distortion,0,sizeof(channel_distortion)); for (x=0; x < (ssize_t) columns; x++) { double x_pixel_mu[MaxPixelChannels+1], x_pixel_sigma_squared[MaxPixelChannels+1], xy_sigma[MaxPixelChannels+1], y_pixel_mu[MaxPixelChannels+1], y_pixel_sigma_squared[MaxPixelChannels+1]; const Quantum *magick_restrict reference, *magick_restrict target; MagickRealType *k; ssize_t v; (void) memset(x_pixel_mu,0,sizeof(x_pixel_mu)); (void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared)); (void) memset(xy_sigma,0,sizeof(xy_sigma)); (void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); (void) memset(y_pixel_mu,0,sizeof(y_pixel_mu)); (void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared)); k=kernel_info->values; reference=p; target=q; for (v=0; v < (ssize_t) kernel_info->height; v++) { ssize_t u; for (u=0; u < (ssize_t) kernel_info->width; u++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double x_pixel, y_pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel=QuantumScale*reference[i]; x_pixel_mu[i]+=(*k)*x_pixel; x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel; y_pixel=QuantumScale* GetPixelChannel(reconstruct_image,channel,target); y_pixel_mu[i]+=(*k)*y_pixel; y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel; xy_sigma[i]+=(*k)*x_pixel*y_pixel; } k++; reference+=GetPixelChannels(image); target+=GetPixelChannels(reconstruct_image); } reference+=GetPixelChannels(image)*columns; target+=GetPixelChannels(reconstruct_image)*columns; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double ssim, x_pixel_mu_squared, x_pixel_sigmas_squared, xy_mu, xy_sigmas, y_pixel_mu_squared, y_pixel_sigmas_squared; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits( reconstruct_image,channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i]; y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i]; xy_mu=x_pixel_mu[i]*y_pixel_mu[i]; xy_sigmas=xy_sigma[i]-xy_mu; x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared; y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared; ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/ ((x_pixel_mu_squared+y_pixel_mu_squared+c1)* (x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2)); channel_distortion[i]+=ssim; channel_distortion[CompositePixelChannel]+=ssim; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetStructuralSimilarityDistortion) #endif for (i=0; i <= MaxPixelChannels; i++) distortion[i]+=channel_distortion[i]; } image_view=DestroyCacheView(image_view); reconstruct_view=DestroyCacheView(reconstruct_view); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; distortion[i]/=((double) columns*rows); } distortion[CompositePixelChannel]/=((double) columns*rows); distortion[CompositePixelChannel]/=(double) GetImageChannels(image); kernel_info=DestroyKernelInfo(kernel_info); return(status); } static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image, const Image *reconstruct_image,double *distortion,ExceptionInfo *exception) { MagickBooleanType status; ssize_t i; status=GetStructuralSimilarityDistortion(image,reconstruct_image, distortion,exception); for (i=0; i <= MaxPixelChannels; i++) distortion[i]=(1.0-(distortion[i]))/2.0; return(status); } MagickExport MagickBooleanType GetImageDistortion(Image *image, const Image *reconstruct_image,const MetricType metric,double *distortion, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); assert(distortion != (double *) NULL); *distortion=0.0; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetPerceptualHashDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } *distortion=channel_distortion[CompositePixelChannel]; channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); (void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(), *distortion); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D i s t o r t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDistortions() compares the pixel channels of an image to a % reconstructed image and returns the specified distortion metric for each % channel. % % The format of the GetImageDistortions method is: % % double *GetImageDistortions(const Image *image, % const Image *reconstruct_image,const MetricType metric, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o metric: the metric. % % o exception: return any errors or warnings in this structure. % */ MagickExport double *GetImageDistortions(Image *image, const Image *reconstruct_image,const MetricType metric, ExceptionInfo *exception) { double *channel_distortion; MagickBooleanType status; size_t length; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Get image distortion. */ length=MaxPixelChannels+1UL; channel_distortion=(double *) AcquireQuantumMemory(length, sizeof(*channel_distortion)); if (channel_distortion == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_distortion,0,length* sizeof(*channel_distortion)); status=MagickTrue; switch (metric) { case AbsoluteErrorMetric: { status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion, exception); break; } case FuzzErrorMetric: { status=GetFuzzDistortion(image,reconstruct_image,channel_distortion, exception); break; } case MeanAbsoluteErrorMetric: { status=GetMeanAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case MeanErrorPerPixelErrorMetric: { status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion, exception); break; } case MeanSquaredErrorMetric: { status=GetMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case NormalizedCrossCorrelationErrorMetric: default: { status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakAbsoluteErrorMetric: { status=GetPeakAbsoluteDistortion(image,reconstruct_image, channel_distortion,exception); break; } case PeakSignalToNoiseRatioErrorMetric: { status=GetPeakSignalToNoiseRatio(image,reconstruct_image, channel_distortion,exception); break; } case PerceptualHashErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case RootMeanSquaredErrorMetric: { status=GetRootMeanSquaredDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralSimilarityErrorMetric: { status=GetStructuralSimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } case StructuralDissimilarityErrorMetric: { status=GetStructuralDisimilarityDistortion(image,reconstruct_image, channel_distortion,exception); break; } } if (status == MagickFalse) { channel_distortion=(double *) RelinquishMagickMemory(channel_distortion); return((double *) NULL); } return(channel_distortion); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e s E q u a l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImagesEqual() compare the pixels of two images and returns immediately % if any pixel is not identical. % % The format of the IsImagesEqual method is: % % MagickBooleanType IsImagesEqual(const Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImagesEqual(const Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image, channel,q))); if (distance >= MagickEpsilon) break; } if (i < (ssize_t) GetPixelChannels(image)) break; p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } if (x < (ssize_t) columns) break; } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); return(y < (ssize_t) rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r M e t r i c % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorMetric() measures the difference between colors at each pixel % location of two images. A value other than 0 means the colors match % exactly. Otherwise an error measure is computed by summing over all % pixels in an image the distance squared in RGB space between each image % pixel and its corresponding pixel in the reconstruct image. The error % measure is assigned to these image members: % % o mean_error_per_pixel: The mean error for any single pixel in % the image. % % o normalized_mean_error: The normalized mean quantization error for % any single pixel in the image. This distance measure is normalized to % a range between 0 and 1. It is independent of the range of red, green, % and blue values in the image. % % o normalized_maximum_error: The normalized maximum quantization % error for any single pixel in the image. This distance measure is % normalized to a range between 0 and 1. It is independent of the range % of red, green, and blue values in your image. % % A small normalized mean square error, accessed as % image->normalized_mean_error, suggests the images are very similar in % spatial layout and color. % % The format of the SetImageColorMetric method is: % % MagickBooleanType SetImageColorMetric(Image *image, % const Image *reconstruct_image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o reconstruct_image: the reconstruct image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorMetric(Image *image, const Image *reconstruct_image,ExceptionInfo *exception) { CacheView *image_view, *reconstruct_view; double area, maximum_error, mean_error, mean_error_per_pixel; MagickBooleanType status; size_t columns, rows; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(reconstruct_image != (const Image *) NULL); assert(reconstruct_image->signature == MagickCoreSignature); area=0.0; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; rows=MagickMax(image->rows,reconstruct_image->rows); columns=MagickMax(image->columns,reconstruct_image->columns); image_view=AcquireVirtualCacheView(image,exception); reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception); for (y=0; y < (ssize_t) rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception); q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double distance; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image, channel); if ((traits == UndefinedPixelTrait) || (reconstruct_traits == UndefinedPixelTrait) || ((reconstruct_traits & UpdatePixelTrait) == 0)) continue; distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image, channel,q))); if (distance >= MagickEpsilon) { mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; } area++; } p+=GetPixelChannels(image); q+=GetPixelChannels(reconstruct_image); } } reconstruct_view=DestroyCacheView(reconstruct_view); image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area); image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale* mean_error/area); image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error); status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i m i l a r i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SimilarityImage() compares the reference image of the image and returns the % best match offset. In addition, it returns a similarity image such that an % exact match location is completely white and if none of the pixels match, % black, otherwise some gray level in-between. % % The format of the SimilarityImageImage method is: % % Image *SimilarityImage(const Image *image,const Image *reference, % const MetricType metric,const double similarity_threshold, % RectangleInfo *offset,double *similarity,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o metric: the metric. % % o similarity_threshold: minimum distortion for (sub)image match. % % o offset: the best match offset of the reference image within the image. % % o similarity: the computed similarity between the images. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const MetricType metric,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { double distortion; Image *similarity_image; MagickBooleanType status; RectangleInfo geometry; SetGeometry(reference,&geometry); geometry.x=x_offset; geometry.y=y_offset; similarity_image=CropImage(image,&geometry,exception); if (similarity_image == (Image *) NULL) return(0.0); distortion=0.0; status=GetImageDistortion(similarity_image,reference,metric,&distortion, exception); similarity_image=DestroyImage(similarity_image); if (status == MagickFalse) return(0.0); return(distortion); } MagickExport Image *SimilarityImage(const Image *image,const Image *reference, const MetricType metric,const double similarity_threshold, RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception) { #define SimilarityImageTag "Similarity/Image" CacheView *similarity_view; Image *similarity_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(offset != (RectangleInfo *) NULL); SetGeometry(reference,offset); *similarity_metric=MagickMaximumValue; similarity_image=CloneImage(image,image->columns-reference->columns+1, image->rows-reference->rows+1,MagickTrue,exception); if (similarity_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(similarity_image,DirectClass,exception); if (status == MagickFalse) { similarity_image=DestroyImage(similarity_image); return((Image *) NULL); } (void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel, exception); /* Measure similarity of reference image against image. */ status=MagickTrue; progress=0; similarity_view=AcquireAuthenticCacheView(similarity_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ shared(progress,status,similarity_metric) \ magick_number_threads(image,image,image->rows-reference->rows+1,1) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++) { double similarity; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) continue; q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp flush(similarity_metric) #endif if (*similarity_metric <= similarity_threshold) break; similarity=GetSimilarityMetric(image,reference,metric,x,y,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SimilarityImage) #endif if ((metric == NormalizedCrossCorrelationErrorMetric) || (metric == UndefinedErrorMetric)) similarity=1.0-similarity; if (similarity < *similarity_metric) { offset->x=x; offset->y=y; *similarity_metric=similarity; } if (metric == PerceptualHashErrorMetric) similarity=MagickMin(0.01*similarity,1.0); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image, channel); if ((traits == UndefinedPixelTrait) || (similarity_traits == UndefinedPixelTrait) || ((similarity_traits & UpdatePixelTrait) == 0)) continue; SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange- QuantumRange*similarity),q); } q+=GetPixelChannels(similarity_image); } if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } similarity_view=DestroyCacheView(similarity_view); if (status == MagickFalse) similarity_image=DestroyImage(similarity_image); return(similarity_image); }
Tanh.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Tanh.c" #else void THNN_(Tanh_updateOutput)( THNNState *state, THTensor *input, THTensor *output) { THTensor_(tanh)(output, input); } void THNN_(Tanh_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output) { THNN_CHECK_SHAPE(output, gradOutput); THTensor_(resizeAs)(gradInput, output); if (output->nDimension == 1 || !THTensor_(isContiguous)(output) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, real z = *output_data; \ *gradInput_data = *gradOutput_data * (1. - z*z); ); } else { real* ptr_gradOutput = THTensor_(data)(gradOutput); real* ptr_gradInput = THTensor_(data)(gradInput); real* ptr_output = THTensor_(data)(output); long i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) { real z = ptr_output[i]; ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z); } } } #endif
main.c
// C Compiler flag: -fopenmp #include <stdio.h> #include <omp.h> #include <stdlib.h> #define N 20 int main(int argc, char *argv[]) { int threadsCount = omp_get_max_threads(); printf("Threads count: %d\n", threadsCount); int length = 12; int a[length]; int b[length]; int c[length]; ; //printf("before first section: a: %d ; b: %d\n",a, b); #pragma omp parallel for schedule(static, 4) num_threads(3) for (int i = 0; i < length; i++) { a[i] = i * 10; b[i] = i * 20; printf("working %d of %d threads\n", omp_get_thread_num(), omp_get_num_threads()); } for (int i = 0; i < length; i++) { printf("%d ", a[i]); } printf("\n"); for (int i = 0; i < length; i++) { printf("%d ", b[i]); } printf("\n"); #pragma omp parallel for schedule(dynamic, 2) num_threads(4) for (int i = 0; i < length; i++) { c[i] = a[i] + b[i]; printf("working %d of %d threads\n", omp_get_thread_num(), omp_get_num_threads()); } for (int i = 0; i < length; i++) { printf("%d ", c[i]); } printf("\n"); return 0; }
pairwise_distance_computation.h
// // Created by Benjamin Tenmann on 29/01/2022. // #ifndef SETRIQ_PAIRWISE_DISTANCE_COMPUTATION_H #define SETRIQ_PAIRWISE_DISTANCE_COMPUTATION_H #include "utils/type_defs.h" template<typename T> double_vector_t pairwise_distance_computation(T metric, const string_vector_t& input_strings) { const auto& n = input_strings.size(); auto&& distance_matrix = double_vector_t (n * (n - 1) / 2); #pragma omp parallel for default(none) firstprivate(n, metric) shared(input_strings, distance_matrix) for (size_t i = 0; i < (n - 1); i++) { for (size_t j = (i + 1); j < n; j++) { const auto& idx = (n * (n - 1)) / 2 - (n - i) * ((n - i) - 1) / 2 + j - i - 1; distance_matrix[idx] = metric.forward(input_strings[i], input_strings[j]); } } return distance_matrix; } #endif //SETRIQ_PAIRWISE_DISTANCE_COMPUTATION_H
acoustics.h
/** * * @file acoustics.h * * @copyright 2018 King Abdullah University of Science and Technology (KAUST). * All rights reserved. * * @author Mustafa Abduljabbar [mustafa.abduljabbar@kaust.edu.sa] and Mohammed Al Farhan [mohammed.farhan@kaust.edu.sa]. * **/ #ifndef ACOUSTICS #define ACOUSTICS #include "base_mpi.h" #include "fmm/args.h" #include "bound_box.h" #include "build_tree.h" #include "logger.h" #include "partition.h" #include "traversal.h" #include "tree_mpi.h" #include "up_down_pass.h" #include <fstream> namespace exafmm { struct fmm_data { public: double kreal; double kimag; int ncrit; int nthreads; int listbased; int tspawn; std::string partitioning; }; vec3 cycles; Bodies buffer; Bounds globalBounds; Bodies bbodies; Bodies vbodies; Bodies fbbodies; Bodies fvbodies; Cells bcells, fbcells; Cells vcells, fvcells; int ntriangles; BaseMPI * baseMPI; BoundBox * boundBox; BuildTree * localTree, * fineLocalTree, * globalTree; Args* args; Partition * partition; Traversal * traversal; TreeMPI * treeMPI; UpDownPass * upDownPass; UpDownPass * upDownPass2; std::vector<std::vector<double> > nearGauss; std::vector<int> patches; bool init; bool init_low; void log_initialize() { args->verbose &= baseMPI->mpirank == 0; logger::verbose = args->verbose; logger::printTitle("FMM Parameters"); args->print(logger::stringLength, P); logger::printTitle("FMM Profiling"); logger::resetTimer(); logger::startTimer("Total FMM"); logger::startPAPI(); } void log_finalize() { logger::stopPAPI(); logger::stopTimer("Total FMM"); logger::printTitle("Total runtime"); logger::printTime("Total FMM"); } #if USE_PART /** * initializes FMM * @param eps2 is the FMM epsilon * @param fmmAttributes FMM performance control attributes * @param nb is the intitial number of bodies * @param xb a vector of X coordinates of bodies * @param yb a vector of Y coordinates of bodies * @param zb a vector of Z coordinates of bodies * @param xt a vector of X coordinates of triangular points * @param yt a vector of Y coordinates of triangular points * @param zt a vector of Z coordinates of triangular points * @param patchids a vector of Mesh node ids corresponding to each particle (to avoid self interaction among nodes) * @param nearGaussPoints Guass quadrature elements for near-field treatments (when enabled) * @param nhdgqp count of Gauss refinements for near-field treatments (when enabled) * @param ntriangles_ number of triangular patches * @param nipp_ number of integration points per patch * @param nearpd near patch distance threshold (to apply p-refinement) * @param ws integration points * @param ipolator_near basis vector */ void FMM_Init(double eps2, fmm_data fmmAttributes, int nb, std::vector<double>& xb, std::vector<double>& yb, std::vector<double>& zb, std::vector<double>& xt, std::vector<double>& yt, std::vector<double>& zt, std::vector<int>& patchids, std::vector<std::vector<double> > nearGaussPoints, int nhdgqp, int ntriangles_, int nipp_, double nearpd, std::vector<double> ws, std::vector<std::vector<double> > ipolator_near) { int nspawn = fmmAttributes.tspawn; const int images = 0; const double theta = 0.4; const bool useRmax = false; const bool useRopt = false; const bool verbose = false; init = true; init_low = true; num_threads(fmmAttributes.nthreads); kernel::eps2 = eps2; kernel::wavek = complex_t(fmmAttributes.kreal, fmmAttributes.kimag); kernel::nhdgqp = nhdgqp; kernel::nipp = nipp_; ntriangles = ntriangles_; #if EXAFMM_NEARF_TREATMENT kernel::nearpd = nearpd; nearGauss = nearGaussPoints; #if EXAFMM_SINGLE kernel::ws.resize(ws.size()); std::copy(ws.begin(), ws.end(), kernel::ws.begin()); kernel::ipolator_near = ipolator_near; #else kernel::ws = ws; kernel::ipolator_near = ipolator_near; #endif #endif kernel::setup(); args = new Args; baseMPI = new BaseMPI; boundBox = new BoundBox(nspawn); localTree = new BuildTree(fmmAttributes.ncrit, nspawn); fineLocalTree = new BuildTree(64, 1024); globalTree = new BuildTree(1, nspawn); partition = new Partition(baseMPI->mpirank, baseMPI->mpisize); traversal = new Traversal(nspawn, images); treeMPI = new TreeMPI(baseMPI->mpirank, baseMPI->mpisize, images); upDownPass = new UpDownPass(theta, useRmax, useRopt); upDownPass2 = new UpDownPass(1, useRmax, useRopt); args->ncrit = fmmAttributes.ncrit; args->write = 1; args->threads = fmmAttributes.nthreads; args->distribution = "external"; args->dual = (fmmAttributes.listbased == 0); args->graft = 0; args->images = images; args->mutual = 0; args->numBodies = 0; args->useRopt = useRopt; args->nspawn = nspawn; args->theta = theta; args->partitioning = fmmAttributes.partitioning.c_str(); //args->verbose = verbose;// & (baseMPI->mpirank == 0); args->verbose = verbose & (baseMPI->mpirank == 0); //args->verbose = 1; args->useRmax = useRmax; logger::verbose = args->verbose; if(nb <= 0) return; assert((nb % kernel::nipp) == 0); vbodies.resize(nb); bbodies.resize(nb); patches.resize(nb); int offset = (ntriangles/baseMPI->mpisize)*baseMPI->mpirank; #pragma omp parallel for for(int i = 0; i < nb; ++i){ B_iter B = bbodies.begin() + i; int point_index = i; B->X[0] = xb[point_index]; B->X[1] = yb[point_index]; B->X[2] = zb[point_index]; B->TRI_POINT[0] = xt[point_index]; B->TRI_POINT[1] = yt[point_index]; B->TRI_POINT[2] = zt[point_index]; int patch = patchids[point_index]; patches[i]=patch; assert(patch < ntriangles); B->PATCH = patch; B->POINT_LOC = point_index%kernel::nipp; int relative_patch = patch - offset; #if EXAFMM_NEARF_TREATMENT for (int j = 0; j < nhdgqp; ++j) { B->GAUSS_NEAR[j][0] = nearGaussPoints[relative_patch*nhdgqp+j][0]; B->GAUSS_NEAR[j][1] = nearGaussPoints[relative_patch*nhdgqp+j][1]; B->GAUSS_NEAR[j][2] = nearGaussPoints[relative_patch*nhdgqp+j][2]; } #endif B->WEIGHT = 1; } #pragma omp parallel for for(int i = 0; i < nb; ++i){ B_iter B = vbodies.begin() + i; int point_index = i; B->X[0] = xb[point_index]; B->X[1] = yb[point_index]; B->X[2] = zb[point_index]; B->TRI_POINT[0] = xt[point_index]; B->TRI_POINT[1] = yt[point_index]; B->TRI_POINT[2] = zt[point_index]; int patch = patchids[point_index]; B->PATCH = patch; B->POINT_LOC = point_index%kernel::nipp; int relative_patch = patch - offset; #if EXAFMM_NEARF_TREATMENT for (int j = 0; j < nhdgqp; ++j) { B->GAUSS_NEAR[j][0] = nearGaussPoints[relative_patch*nhdgqp+j][0]; B->GAUSS_NEAR[j][1] = nearGaussPoints[relative_patch*nhdgqp+j][1]; B->GAUSS_NEAR[j][2] = nearGaussPoints[relative_patch*nhdgqp+j][2]; } #endif B->WEIGHT = 1; } log_initialize(); } /** * partitions FMM * @param nb is the intitial number of bodies (input/output) * @param xb a vector of X coordinates of bodies (input/output) * @param yb a vector of Y coordinates of bodies (input/output) * @param zb a vector of Z coordinates of bodies (input/output) * @param xt a vector of X coordinates of triangular nodes (input/output) * @param yt a vector of Y coordinates of triangular nodes (input/output) * @param zt a vector of Z coordinates of triangular nodes (input/output) * @param patch a vector of Mesh node ids corresponding to each particle to avoid self interaction among nodes (input/output) * @param loc the location of eatch particle with the triangular mesh (input/output) */ void FMM_Partition(int& nb, std::vector<double>&xb, std::vector<double>&yb, std::vector<double>&zb, std::vector<double>&xt, std::vector<double>&yt, std::vector<double>&zt, std::vector<int>& patch, std::vector<short>& loc) { logger::printTitle("Partition Profiling"); Bounds localBounds; if(nb > 0) { localBounds = boundBox->getBounds(bbodies); localBounds = boundBox->getBounds(vbodies, localBounds); } else { localBounds.Xmin = 0; localBounds.Xmax = 0; } globalBounds = baseMPI->allreduceBounds(localBounds); cycles = globalBounds.Xmax - globalBounds.Xmin; if(baseMPI->mpisize == 1) { for (B_iter B=bbodies.begin(); B!=bbodies.end(); B++) { int i = B-bbodies.begin(); B->IBODY = i; } for (B_iter B=vbodies.begin(); B!=vbodies.end(); B++) { int i = B-vbodies.begin(); B->IBODY = i; } nb = bbodies.size(); return; } int nPatches = bbodies.size()/kernel::nipp; assert(bbodies.size()%kernel::nipp == 0); Bodies partitionBodies(nPatches); for (B_iter B=bbodies.begin(), BP=partitionBodies.begin(); B!=bbodies.end(); B+=kernel::nipp, BP++) { BP->X[0] = B->X[0]; BP->X[1] = B->X[1]; BP->X[2] = B->X[2]; BP->PATCH = B->PATCH; BP->WEIGHT = 1; BP->IBODY = B - bbodies.begin(); } partition->partition(partitionBodies, globalBounds, args->partitioning); Bodies tempBodies(bbodies); int currPatch = 0; B_iter bbodiesBegin = bbodies.begin(); B_iter vbodiesBegin = vbodies.begin(); for (B_iter B=partitionBodies.begin(); B!=partitionBodies.end(); B++) { B_iter TB = tempBodies.begin() + B->IBODY; B_iter BB = bbodiesBegin+currPatch*kernel::nipp; B_iter VB = vbodiesBegin+currPatch*kernel::nipp; for (int i = 0; i < kernel::nipp; ++i) { assert(B->IRANK >= 0 && B->IRANK < baseMPI->mpisize); (TB+i)->IRANK = B->IRANK; *(BB+i) = *(TB+i); *(VB+i) = *(TB+i); } currPatch++; } assert(bbodiesBegin+currPatch*kernel::nipp == bbodies.end()); nb = bbodies.size(); int global; MPI_Allreduce(&nb, &global, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if(global != (ntriangles * kernel::nipp)) std::cout << global << " : " << ntriangles*kernel::nipp << std::endl; assert(global == (ntriangles * kernel::nipp)); bbodies = treeMPI->commBodies(bbodies); vbodies = treeMPI->commBodies(vbodies); nb = bbodies.size(); MPI_Allreduce(&nb, &global, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); if(global != (ntriangles * kernel::nipp)) std::cout << global << " : " << ntriangles*kernel::nipp << std::endl; assert(global == (ntriangles * kernel::nipp)); patch.resize(nb); loc.resize(nb); xb.resize(nb); yb.resize(nb); zb.resize(nb); xt.resize(nb); yt.resize(nb); zt.resize(nb); for (B_iter B=bbodies.begin(); B!=bbodies.end(); B++) { int i = B-bbodies.begin(); patch[i] = B->PATCH; assert(patch[i] < ntriangles); loc[i] = B->POINT_LOC; xb[i] = B->X[0]; yb[i] = B->X[1]; zb[i] = B->X[2]; xt[i] = B->TRI_POINT[0]; yt[i] = B->TRI_POINT[1]; zt[i] = B->TRI_POINT[2]; B->IBODY = i; } for (B_iter B=vbodies.begin(); B!=vbodies.end(); B++) { int i = B-vbodies.begin(); B->IBODY = i; } } #else /** * initializes FMM * @param eps2 is the FMM epsilon * @param fmmAttributes FMM performance control attributes * @param nb is the intitial number of bodies * @param xb a vector of X coordinates of bodies * @param yb a vector of Y coordinates of bodies * @param zb a vector of Z coordinates of bodies * @param patchids a vector of Mesh node ids corresponding to each particle (to avoid self interaction among nodes) * @param nearGaussPoints Guass quadrature elements for near-field treatments (when enabled) * @param nhdgqp count of Gauss refinements for near-field treatments (when enabled) * @param ntriangles_ number of triangular patches * @param nipp_ number of integration points per patch * @param nearpd near patch distance threshold (to apply p-refinement) * @param ws integration points * @param ipolator_near basis vector */ void FMM_Init(double eps2, fmm_data fmmAttributes, int nb, std::vector<double>& xb, std::vector<double>& yb, std::vector<double>& zb, std::vector<int>& patchids, std::vector<std::vector<double> > nearGaussPoints, int nhdgqp, int ntriangles_, int nipp_, double nearpd, std::vector<double> ws, std::vector<std::vector<double> > ipolator_near) { int nspawn = fmmAttributes.tspawn; const int images = 0; const double theta = 0.4; const bool useRmax = false; const bool useRopt = false; const bool verbose = false; init = true; init_low = true; num_threads(fmmAttributes.nthreads); kernel::eps2 = eps2; kernel::wavek = complex_t(fmmAttributes.kreal, fmmAttributes.kimag); kernel::nhdgqp = nhdgqp; kernel::nipp = nipp_; ntriangles = ntriangles_; #if EXAFMM_NEARF_TREATMENT kernel::nearpd = nearpd; nearGauss = nearGaussPoints; #if EXAFMM_SINGLE kernel::ws.resize(ws.size()); std::copy(ws.begin(), ws.end(), kernel::ws.begin()); kernel::ipolator_near = ipolator_near; #else kernel::ws = ws; kernel::ipolator_near = ipolator_near; #endif #endif kernel::setup(); args = new Args; baseMPI = new BaseMPI; boundBox = new BoundBox(nspawn); localTree = new BuildTree(fmmAttributes.ncrit, nspawn); fineLocalTree = new BuildTree(64, 1024); globalTree = new BuildTree(1, nspawn); partition = new Partition(baseMPI->mpirank, baseMPI->mpisize); traversal = new Traversal(nspawn, images); treeMPI = new TreeMPI(baseMPI->mpirank, baseMPI->mpisize, images); upDownPass = new UpDownPass(theta, useRmax, useRopt); upDownPass2 = new UpDownPass(1, useRmax, useRopt); args->ncrit = fmmAttributes.ncrit; args->write = 1; args->threads = fmmAttributes.nthreads; args->distribution = "external"; args->dual = (fmmAttributes.listbased == 0); args->graft = 0; args->images = images; args->mutual = 0; args->numBodies = 0; args->useRopt = useRopt; args->nspawn = nspawn; args->theta = theta; args->partitioning = fmmAttributes.partitioning.c_str(); //args->verbose = verbose;// & (baseMPI->mpirank == 0); args->verbose = verbose & (baseMPI->mpirank == 0); //args->verbose = 1; args->useRmax = useRmax; logger::verbose = args->verbose; if(nb <= 0) return; assert((nb % kernel::nipp) == 0); vbodies.resize(nb); bbodies.resize(nb); patches.resize(nb); #pragma omp parallel for for(int i = 0; i < nb; ++i){ B_iter B = bbodies.begin() + i; int point_index = i; B->X[0] = xb[point_index]; B->X[1] = yb[point_index]; B->X[2] = zb[point_index]; int patch = patchids[point_index]; patches[i]=patch; B->PATCH = patch; B->POINT_LOC = point_index%kernel::nipp; #if EXAFMM_NEARF_TREATMENT for (int j = 0; j < nhdgqp; ++j) { B->GAUSS_NEAR[j][0] = nearGaussPoints[patch*nhdgqp+j][0]; B->GAUSS_NEAR[j][1] = nearGaussPoints[patch*nhdgqp+j][1]; B->GAUSS_NEAR[j][2] = nearGaussPoints[patch*nhdgqp+j][2]; } #endif B->WEIGHT = 1; } #pragma omp parallel for for(int i = 0; i < nb; ++i){ B_iter B = vbodies.begin() + i; int point_index = i; B->X[0] = xb[point_index]; B->X[1] = yb[point_index]; B->X[2] = zb[point_index]; int patch = patchids[point_index]; B->PATCH = patch; B->POINT_LOC = point_index%kernel::nipp; #if EXAFMM_NEARF_TREATMENT for (int j = 0; j < nhdgqp; ++j) { B->GAUSS_NEAR[j][0] = nearGaussPoints[patch*nhdgqp+j][0]; B->GAUSS_NEAR[j][1] = nearGaussPoints[patch*nhdgqp+j][1]; B->GAUSS_NEAR[j][2] = nearGaussPoints[patch*nhdgqp+j][2]; } #endif B->WEIGHT = 1; } log_initialize(); } /** * partitions FMM * @param nb is the intitial number of bodies (input/outpu) * @param xb a vector of X coordinates of bodies (input/outpu) * @param yb a vector of Y coordinates of bodies (input/outpu) * @param zb a vector of Z coordinates of bodies (input/outpu) * @param patch a vector of Mesh node ids corresponding to each particle to avoid self interaction among nodes (input/output) * @param loc the location of eatch particle with the triangular mesh (input/output) */ void FMM_Partition(int& nb, std::vector<double>&xb, std::vector<double>&yb, std::vector<double>&zb, std::vector<int>& patch, std::vector<short>& loc) { logger::printTitle("Partition Profiling"); Bounds localBounds; if(nb > 0) { localBounds = boundBox->getBounds(bbodies); localBounds = boundBox->getBounds(vbodies, localBounds); } else { localBounds.Xmin = 0; localBounds.Xmax = 0; } globalBounds = baseMPI->allreduceBounds(localBounds); cycles = globalBounds.Xmax - globalBounds.Xmin; if(baseMPI->mpisize == 1) { for (B_iter B=bbodies.begin(); B!=bbodies.end(); B++) { int i = B-bbodies.begin(); B->IBODY = i; } for (B_iter B=vbodies.begin(); B!=vbodies.end(); B++) { int i = B-vbodies.begin(); B->IBODY = i; } nb = bbodies.size(); return; } int nPatches = bbodies.size()/kernel::nipp; Bodies partitionBodies(nPatches); for (B_iter B=bbodies.begin(), BP=partitionBodies.begin(); B!=bbodies.end(); B+=kernel::nipp, BP++) { BP->X[0] = B->X[0]; BP->X[1] = B->X[1]; BP->X[2] = B->X[2]; BP->PATCH = B->PATCH; BP->WEIGHT = 1; BP->IBODY = B - bbodies.begin(); } partition->partition(partitionBodies, globalBounds,args->partitioning); Bodies tempBodies(bbodies); int currPatch = 0; B_iter bbodiesBegin = bbodies.begin(); B_iter vbodiesBegin = vbodies.begin(); for (B_iter B=partitionBodies.begin(); B!=partitionBodies.end(); B++) { B_iter TB = tempBodies.begin() + B->IBODY; B_iter BB = bbodiesBegin+currPatch*kernel::nipp; B_iter VB = vbodiesBegin+currPatch*kernel::nipp; for (int i = 0; i < kernel::nipp; ++i) { (TB+i)->IRANK = B->IRANK; *(BB+i) = *(TB+i); *(VB+i) = *(TB+i); } currPatch++; } bbodies = treeMPI->commBodies(bbodies); vbodies = treeMPI->commBodies(vbodies); nb = bbodies.size(); patch.resize(nb); loc.resize(nb); xb.resize(nb); yb.resize(nb); zb.resize(nb); for (B_iter B=bbodies.begin(); B!=bbodies.end(); B++) { int i = B-bbodies.begin(); patch[i] = B->PATCH; loc[i] = B->POINT_LOC; xb[i] = B->X[0]; yb[i] = B->X[1]; zb[i] = B->X[2]; B->IBODY = i; } for (B_iter B=vbodies.begin(); B!=vbodies.end(); B++) { int i = B-vbodies.begin(); B->IBODY = i; } } #endif /** * Builds FMM Tree */ void FMM_BuildTree() { Bounds localBoundsB = boundBox->getBounds(bbodies); bcells = localTree->buildTree(bbodies, buffer, localBoundsB); Bounds localBoundsV = boundBox->getBounds(vbodies); vcells = localTree->buildTree(vbodies, buffer, localBoundsV); } /** * Applies FMM * @param vi target values (output) * @param vb source values (input) * @param wb source weights (input) * @param verbose output verbosity (input) */ void FMM_MatVec(std::vector<std::complex<double> >& vi, std::vector<std::complex<double> >const& vb, std::vector<std::complex<double> >const& wb, bool verbose) { args->verbose = verbose; log_initialize(); for (B_iter B=bbodies.begin(); B!=bbodies.end(); B++) { B->SRC = 1.0; B->QWEIGHT = 1.0; B->TRG = 0; B->ICELL = 0; } for (B_iter B=vbodies.begin(); B!=vbodies.end(); B++) { B->SRC = vb[B->IBODY]; B->QWEIGHT = wb[B->IBODY]; B->TRG = 0; B->ICELL = 0; } if(init) { FMM_BuildTree(); upDownPass->upwardPass(bcells); upDownPass->upwardPass(vcells); init = 0; } else { upDownPass2->upwardPass(bcells, false); upDownPass2->upwardPass(vcells, false); } treeMPI->setLET(vcells, cycles); #pragma omp parallel sections { #pragma omp section { #if EXAFMM_USE_DISTGRAPH treeMPI->initDistGraph(globalBounds); treeMPI->commDistGraph(cycles); #else treeMPI->commBodies(); treeMPI->commCells(); #endif } #pragma omp section { traversal->initListCount(bcells); traversal->initWeight(bcells); traversal->traverse(bcells, vcells, cycles, args->dual, args->mutual); } } if (baseMPI->mpisize > 1) { if (args->graft) { treeMPI->linkLET(); Bodies gbodies = treeMPI->root2body(); Cells jcells = globalTree->buildTree(gbodies, buffer, globalBounds); treeMPI->attachRoot(jcells); traversal->traverse(bcells, jcells, cycles, args->dual, false); } else { for (int irank=0; irank<baseMPI->mpisize; irank++) { Cells jcells; treeMPI->getLET(jcells, (baseMPI->mpirank+irank)%baseMPI->mpisize); traversal->traverse(bcells, jcells, cycles, args->dual, false); } } } upDownPass->downwardPass(bcells); if(verbose) { localTree->printTreeData(bcells); traversal->printTraversalData(); traversal->writeTraversalData(baseMPI->mpirank, bbodies.size()); logger::writeTime(baseMPI->mpirank); } log_finalize(); for (B_iter B=bbodies.begin(); B!=bbodies.end(); B++) { assert(B->IBODY < bbodies.size() && B->IBODY >= 0); vi[B->IBODY] = B->TRG[0]; } } /** * Finalizes FMM */ void FMM_Finalize() { delete args; delete baseMPI; delete boundBox; delete localTree; delete fineLocalTree; delete globalTree; delete partition; delete traversal; delete treeMPI; delete upDownPass; } /** * calculates result using N-Body direct fomr the first nb bodies * @param nb number of bodies * @param vi target values * @param addresses locations of sample bodies */ void Direct(int nb, std::complex<double>* vi, std::vector<int>& addresses) { Bodies ibodies(bbodies.begin(), bbodies.begin() + nb); Bodies jbodies(vbodies.begin(), vbodies.end()); for (B_iter B=ibodies.begin(); B!=ibodies.end(); B++) { B->TRG = 0; B->ICELL = 0; } for (B_iter B=jbodies.begin(); B!=jbodies.end(); B++) { B->TRG = 0; B->ICELL = 0; } for (int irank=0; irank<baseMPI->mpisize; irank++) { if (args->verbose) std::cout << "Direct loop : " << irank+1 << "/" << baseMPI->mpisize << std::endl; treeMPI->shiftBodies(jbodies); traversal->direct(ibodies, jbodies, cycles); } for (B_iter B=ibodies.begin(); B!=ibodies.end(); B++) { int i = B-ibodies.begin(); addresses[i] = B->IBODY; vi[B->IBODY] = B->TRG[0]; } } /** * calculates result using N-Body direct fomr the sample nb bodies * @param nb number of sample bodies * @param vi target values * @param addresses locations of sample bodies */ void DirectSample(int nb, std::vector<std::complex<double> >& vi, std::vector<int>& addresses) { Bodies ibodies(nb); Bodies jbodies(vbodies.begin(), vbodies.end()); srand(time(NULL)); for (B_iter B=ibodies.begin(); B!=ibodies.end(); B++) { *B = bbodies[rand()%bbodies.size()]; B->TRG = 0; B->ICELL = 0; } for (B_iter B=jbodies.begin(); B!=jbodies.end(); B++) { B->TRG = 0; B->ICELL = 0; } for (int irank=0; irank<baseMPI->mpisize; irank++) { if (args->verbose) std::cout << "Direct loop : " << irank+1 << "/" << baseMPI->mpisize << std::endl; treeMPI->shiftBodies(jbodies); traversal->direct(ibodies, jbodies, cycles); } for (B_iter B=ibodies.begin(); B!=ibodies.end(); B++) { int i = B-ibodies.begin(); addresses[i] = B->IBODY; vi[B->IBODY] = B->TRG[0]; } } } #endif
lib-1.c
#include <stdlib.h> #include <omp.h> int main (void) { double d, e; int l; omp_lock_t lck; omp_nest_lock_t nlck; d = omp_get_wtime (); omp_init_lock (&lck); omp_set_lock (&lck); if (omp_test_lock (&lck)) abort (); omp_unset_lock (&lck); if (! omp_test_lock (&lck)) abort (); if (omp_test_lock (&lck)) abort (); omp_unset_lock (&lck); omp_destroy_lock (&lck); omp_init_nest_lock (&nlck); if (omp_test_nest_lock (&nlck) != 1) abort (); omp_set_nest_lock (&nlck); if (omp_test_nest_lock (&nlck) != 3) abort (); omp_unset_nest_lock (&nlck); omp_unset_nest_lock (&nlck); if (omp_test_nest_lock (&nlck) != 2) abort (); omp_unset_nest_lock (&nlck); omp_unset_nest_lock (&nlck); omp_destroy_nest_lock (&nlck); omp_set_dynamic (1); if (! omp_get_dynamic ()) abort (); omp_set_dynamic (0); if (omp_get_dynamic ()) abort (); omp_set_nested (1); if (! omp_get_nested ()) abort (); omp_set_nested (0); if (omp_get_nested ()) abort (); omp_set_num_threads (5); if (omp_get_num_threads () != 1) abort (); if (omp_get_max_threads () != 5) abort (); if (omp_get_thread_num () != 0) abort (); omp_set_num_threads (3); if (omp_get_num_threads () != 1) abort (); if (omp_get_max_threads () != 3) abort (); if (omp_get_thread_num () != 0) abort (); l = 0; #pragma omp parallel reduction (|:l) { l = omp_get_num_threads () != 3; l |= omp_get_thread_num () < 0; l |= omp_get_thread_num () >= 3; #pragma omp master l |= omp_get_thread_num () != 0; } if (l) abort (); if (omp_get_num_procs () <= 0) abort (); if (omp_in_parallel ()) abort (); #pragma omp parallel reduction (|:l) l = ! omp_in_parallel (); #pragma omp parallel reduction (|:l) if (1) l = ! omp_in_parallel (); e = omp_get_wtime (); if (d > e) abort (); d = omp_get_wtick (); /* Negative precision is definitely wrong, bigger than 1s clock resolution is also strange. */ if (d <= 0 || d > 1) abort (); return 0; }
GB_binop__rminus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_fc64 // A.*B function (eWiseMult): GB_AemultB__rminus_fc64 // A*D function (colscale): GB_AxD__rminus_fc64 // D*A function (rowscale): GB_DxB__rminus_fc64 // C+=B function (dense accum): GB_Cdense_accumB__rminus_fc64 // C+=b function (dense accum): GB_Cdense_accumb__rminus_fc64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_fc64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_fc64 // C=scalar+B GB_bind1st__rminus_fc64 // C=scalar+B' GB_bind1st_tran__rminus_fc64 // C=A+scalar GB_bind2nd__rminus_fc64 // C=A'+scalar GB_bind2nd_tran__rminus_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_minus (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_minus (y, x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FC64 || GxB_NO_RMINUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_fc64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_fc64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_minus (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_minus (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_minus (aij, x) ; \ } GrB_Info GB_bind1st_tran__rminus_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_minus (y, aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rt_dsyrk.c
#include "runtime.h" #ifdef PLASMA_WITH_SMP #pragma omp target device (smp) copy_deps #pragma omp task in([lda*k]A) inout([ldc*n]C) label(dsyrk_n_smp) void CORE_dsyrk_ompss_n(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { CORE_dsyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } #pragma omp target device (smp) copy_deps #pragma omp task in([n*lda]A) inout([ldc*n]C) label(dsyrk_t_smp) void CORE_dsyrk_ompss_t(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { CORE_dsyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } #endif #ifdef PLASMA_WITH_CUDA_HYBRID #pragma omp target device (smp) copy_deps #pragma omp task in([lda*k]A) inout([ldc*n]C) label(dsyrk_n_hyb_smp) void CORE_dsyrk_ompss_n(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { CORE_dsyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } #pragma omp target device (smp) copy_deps #pragma omp task in([n*lda]A) inout([ldc*n]C) label(dsyrk_t_hyb_smp) void CORE_dsyrk_ompss_t(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { CORE_dsyrk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } //Alternative implementations #pragma omp target device (cuda) copy_deps implements(CORE_dsyrk_ompss_n) #pragma omp task in([lda*k]A) inout([ldc*n]C) label(dsyrk_n_hyb_cuda) void CORE_dsyrk_cuda_n(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { cublasFillMode_t cuplo; cublasOperation_t cutrans; if ( uplo == PlasmaUpper ) cuplo = CUBLAS_FILL_MODE_UPPER; else cuplo = CUBLAS_FILL_MODE_LOWER; if ( trans == PlasmaNoTrans) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDsyrk(handle, cuplo, cutrans, n, k, &alpha, A, lda, &beta, C, ldc); } #pragma omp target device (cuda) copy_deps implements(CORE_dsyrk_ompss_t) #pragma omp task in([n*lda]A) inout([ldc*n]C) label(dsyrk_t_hyb_cuda) void CORE_dsyrk_cuda_t(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { cublasFillMode_t cuplo; cublasOperation_t cutrans; if ( uplo == PlasmaUpper ) cuplo = CUBLAS_FILL_MODE_UPPER; else cuplo = CUBLAS_FILL_MODE_LOWER; if ( trans == PlasmaNoTrans) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDsyrk(handle, cuplo, cutrans, n, k, &alpha, A, lda, &beta, C, ldc); } #endif #ifdef PLASMA_WITH_CUDA_PURE #pragma omp target device (cuda) copy_deps #pragma omp task in([lda*k]A) inout([ldc*n]C) label(dsyrk_n_cuda) void CORE_dsyrk_ompss_n(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { cublasFillMode_t cuplo; cublasOperation_t cutrans; if ( uplo == PlasmaUpper ) cuplo = CUBLAS_FILL_MODE_UPPER; else cuplo = CUBLAS_FILL_MODE_LOWER; if ( trans == PlasmaNoTrans) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDsyrk(handle, cuplo, cutrans, n, k, &alpha, A, lda, &beta, C, ldc); } #pragma omp target device (cuda) copy_deps #pragma omp task in([n*lda]A) inout([ldc*n]C) label(dsyrk_t_cuda) void CORE_dsyrk_ompss_t(PLASMA_enum uplo, PLASMA_enum trans, int n, int k, double alpha, double *A, int lda, double beta, double *C, int ldc, int nb) { cublasFillMode_t cuplo; cublasOperation_t cutrans; if ( uplo == PlasmaUpper ) cuplo = CUBLAS_FILL_MODE_UPPER; else cuplo = CUBLAS_FILL_MODE_LOWER; if ( trans == PlasmaNoTrans) cutrans = CUBLAS_OP_N; else cutrans = CUBLAS_OP_T; cublasHandle_t handle = nanos_get_cublas_handle(); cudaStream_t stream = nanos_get_kernel_execution_stream(); cublasSetStream(handle, stream); cublasDsyrk(handle, cuplo, cutrans, n, k, &alpha, A, lda, &beta, C, ldc); } #endif void RT_CORE_dsyrk(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum uplo, PLASMA_enum trans, int n, int k, int nb, double alpha, double *A, int lda, double beta, double *C, int ldc) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dsyrk( quark, task_flags, uplo, trans, n, k, nb, alpha, A, lda, beta, C, ldc); } else if (plasma->runtime == PLASMA_OMPSS) { if ( trans == PlasmaNoTrans ) { CORE_dsyrk_ompss_n(uplo, trans, n, k, alpha, A, lda, beta, C, ldc, nb); } else { CORE_dsyrk_ompss_t(uplo, trans, n, k, alpha, A, lda, beta, C, ldc, nb); } } }
GB_unaryop__ainv_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_fp64 // op(A') function: GB_tran__ainv_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint64_t z ; GB_CAST_UNSIGNED(z,aij,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_fp64 ( uint64_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GiRaFFE_boundary_conditions.h
// Currently, we're using basic Cartesian boundary conditions, pending fixes by Zach. // Part P8a: Declare boundary condition FACE_UPDATE macro, // which updates a single face of the 3D grid cube // using quadratic polynomial extrapolation. // Basic extrapolation boundary conditions #define FACE_UPDATE(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \ gfs[IDX4(which_gf,i0,i1,i2)] = \ +2.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -1.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \ } // +1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ // Basic Copy boundary conditions #define FACE_UPDATE_COPY(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \ gfs[IDX4(which_gf,i0,i1,i2)] = gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)]; \ } // Part P8b: Boundary condition driver routine: Apply BCs to all six // boundary faces of the cube, filling in the innermost // ghost zone first, and moving outward. const int MAXFACE = -1; const int NUL = +0; const int MINFACE = +1;// This macro acts differently in that it acts on an entire 3-vector of gfs, instead of 1. // which_gf_0 corresponds to the zeroth component of that vector. The if statements only // evaluate true if the velocity is directed inwards on the face in consideration. #define FACE_UPDATE_OUTFLOW(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \ aux_gfs[IDX4(which_gf,i0,i1,i2)] = \ +2.0*aux_gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -1.0*aux_gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \ } /* aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] = \ +3.0*aux_gfs[IDX4(which_gf_0+1,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -3.0*aux_gfs[IDX4(which_gf_0+1,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \ +1.0*aux_gfs[IDX4(which_gf_0+1,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] = \ +3.0*aux_gfs[IDX4(which_gf_0+2,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -3.0*aux_gfs[IDX4(which_gf_0+2,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \ +1.0*aux_gfs[IDX4(which_gf_0+2,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ if(FACEX0*aux_gfs[IDX4(which_gf_0+0,i0,i1,i2)] > 0.0) { \ aux_gfs[IDX4(which_gf_0+0,i0,i1,i2)] = 0.0; \ } \ if(FACEX1*aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] > 0.0) { \ aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] = 0.0; \ } \ if(FACEX2*aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] > 0.0) { \ aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] = 0.0; \ } \ */ void apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *gfs,REAL *aux_gfs) { // First, we apply extrapolation boundary conditions to AD #pragma omp parallel for for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) { if(which_gf < STILDED0GF || which_gf > STILDED2GF) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. FACE_UPDATE(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } } } // Apply outflow/extrapolation boundary conditions to ValenciavU by passing VALENCIAVU0 as which_gf_0 for(int which_gf=VALENCIAVU0GF;which_gf<=VALENCIAVU2GF;which_gf++) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { FACE_UPDATE_OUTFLOW(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_OUTFLOW(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } } // Then, we apply copy boundary conditions to StildeD and psi6Phi /*#pragma omp parallel for for(int which_gf=3;which_gf<NUM_EVOL_GFS;which_gf++) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. FACE_UPDATE_COPY(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_COPY(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } }*/ }// A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions void FACE_UPDATE_EXACT(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], const int n, const REAL dt,REAL *out_gfs,REAL *aux_gfs, const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max, const int FACEX0,const int FACEX1,const int FACEX2) { for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { REAL xx0 = xx[0][i0]-n*dt; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; if(xx0<=lbound) { #include "GiRaFFEfood_A_v_1D_tests_left.h" } else if (xx0<rbound) { #include "GiRaFFEfood_A_v_1D_tests_center.h" } else { #include "GiRaFFEfood_A_v_1D_tests_right.h" } out_gfs[IDX4(PSI6PHIGF, i0,i1,i2)] = 0.0; } }void apply_bcs_EXACT(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], const int n, const REAL dt, REAL *out_gfs,REAL *aux_gfs) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. // Right now, we only want to update the xmin and xmax faces with the exact data. FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } }// A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions void FACE_UPDATE_EXACT_StildeD(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *out_gfs,REAL *out_gfs_exact, const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max, const int FACEX0,const int FACEX1,const int FACEX2) { // This is currently modified to calculate more exact boundary conditions for StildeD. Rename if it works. /*for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { #include "GiRaFFEfood_HO_Stilde.h" }*/ /*idx = IDX3(i0,i1,i2); out_gfs[IDX4pt(STILDED0GF,idx)] = out_gfs_exact[IDX4pt(STILDED0GF,idx)]; out_gfs[IDX4pt(STILDED1GF,idx)] = out_gfs_exact[IDX4pt(STILDED1GF,idx)]; out_gfs[IDX4pt(STILDED2GF,idx)] = out_gfs_exact[IDX4pt(STILDED2GF,idx)];*/ }void apply_bcs_EXACT_StildeD(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *out_gfs,REAL *out_gfs_exact) { int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS }; int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS }; for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. // Right now, we only want to update the xmin and xmax faces with the exact data. FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--; FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--; //FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++; } }
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } }
flux.c
#include <stddef.h> #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "geometry.h" #include "bench.h" #include "phy.h" #include "core_kernel.h" #define MAG0 (0.5 / 3) #define MAG1 (-MAG0) static void _KRN_ComputeFlux( const size_t nfnodes, const uint32_t bsz, const uint32_t *nfptr, const double *f_xyz0, const double *f_xyz1, const double *f_xyz2, const uint32_t *ie, const uint32_t *part, const uint32_t *n0, const uint32_t *n1, const double *x0, const double *x1, const double *x2, const double *x3, const double *q, const size_t dofs, const size_t snfc, const uint32_t *snfic, const double *xyz0, const double *xyz1, const double *xyz2, const uint32_t *sn0, const uint32_t *sn1, const uint32_t *sn2, const double *w0termsx, const double *w0termsy, const double *w0termsz, const double *w1termsx, const double *w1termsy, const double *w1termsz, double *gradx0, double *gradx1, double *gradx2, double *r) { memset(gradx0, 0, dofs * sizeof(double)); memset(gradx1, 0, dofs * sizeof(double)); memset(gradx2, 0, dofs * sizeof(double)); memset(r, 0, dofs * sizeof(double)); /* Calculates the gradients at the nodes using weighted least squares This solves using Gram-Schmidt */ #pragma omp parallel { const uint32_t t = (unsigned int) omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const uint32_t idx0 = (unsigned int) bsz * node0; const uint32_t idx1 = (unsigned int) bsz * node1; double dq; double termx; double termy; double termz; if(part[node0] == t) { termx = w0termsx[i]; termy = w0termsy[i]; termz = w0termsz[i]; dq = q[idx1 + 0] - q[idx0 + 0]; gradx0[idx0 + 0] += termx * dq; gradx1[idx0 + 0] += termy * dq; gradx2[idx0 + 0] += termz * dq; dq = q[idx1 + 1] - q[idx0 + 1]; gradx0[idx0 + 1] += termx * dq; gradx1[idx0 + 1] += termy * dq; gradx2[idx0 + 1] += termz * dq; dq = q[idx1 + 2] - q[idx0 + 2]; gradx0[idx0 + 2] += termx * dq; gradx1[idx0 + 2] += termy * dq; gradx2[idx0 + 2] += termz * dq; dq = q[idx1 + 3] - q[idx0 + 3]; gradx0[idx0 + 3] += termx * dq; gradx1[idx0 + 3] += termy * dq; gradx2[idx0 + 3] += termz * dq; } if(part[node1] == t) { termx = w1termsx[i]; termy = w1termsy[i]; termz = w1termsz[i]; dq = q[idx0 + 0] - q[idx1 + 0]; gradx0[idx1 + 0] += termx * dq; gradx1[idx1 + 0] += termy * dq; gradx2[idx1 + 0] += termz * dq; dq = q[idx0 + 1] - q[idx1 + 1]; gradx0[idx1 + 1] += termx * dq; gradx1[idx1 + 1] += termy * dq; gradx2[idx1 + 1] += termz * dq; dq = q[idx0 + 2] - q[idx1 + 2]; gradx0[idx1 + 2] += termx * dq; gradx1[idx1 + 2] += termy * dq; gradx2[idx1 + 2] += termz * dq; dq = q[idx0 + 3] - q[idx1 + 3]; gradx0[idx1 + 3] += termx * dq; gradx1[idx1 + 3] += termy * dq; gradx2[idx1 + 3] += termz * dq; } } } /* Calculates the fluxes on the face and performs the flux balance */ #pragma omp parallel { uint32_t t = (unsigned int) omp_get_thread_num(); uint32_t ie0 = ie[t]; uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { uint32_t node0 = n0[i]; uint32_t node1 = n1[i]; double xn = x0[i]; double yn = x1[i]; double zn = x2[i]; double ln = x3[i]; double xmean = 0.5f * (xyz0[node0] + xyz0[node1]); double ymean = 0.5f * (xyz1[node0] + xyz1[node1]); double zmean = 0.5f * (xyz2[node0] + xyz2[node1]); /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Get variables on "left" and "right" side of face */ double rx = xmean - xyz0[node0]; double ry = ymean - xyz1[node0]; double rz = zmean - xyz2[node0]; uint32_t idx0 = (unsigned int) bsz * node0; uint32_t idx1 = (unsigned int) bsz * node1; // P double pL = q[idx0 + 0] + gradx0[idx0 + 0] * rx; pL += gradx1[idx0 + 0] * ry; pL += gradx2[idx0 + 0] * rz; // Velocity u double uL = q[idx0 + 1] + gradx0[idx0 + 1] * rx; uL += gradx1[idx0 + 1] * ry; uL += gradx2[idx0 + 1] * rz; // Velocity v double vL = q[idx0 + 2] + gradx0[idx0 + 2] * rx; vL += gradx1[idx0 + 2] * ry; vL += gradx2[idx0 + 2] * rz; // Velocity w double wL = q[idx0 + 3] + gradx0[idx0 + 3] * rx; wL += gradx1[idx0 + 3] * ry; wL += gradx2[idx0 + 3] * rz; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; rx = xmean - xyz0[node1]; ry = ymean - xyz1[node1]; rz = zmean - xyz2[node1]; // P double pR = q[idx1 + 0] + gradx0[idx1 + 0] * rx; pR += gradx1[idx1 + 0] * ry; pR += gradx2[idx1 + 0] * rz; // Velocity u double uR = q[idx1 + 1] + gradx0[idx1 + 1] * rx; uR += gradx1[idx1 + 1] * ry; uR += gradx2[idx1 + 1] * rz; // Velocity v double vR = q[idx1 + 2] + gradx0[idx1 + 2] * rx; vR += gradx1[idx1 + 2] * ry; vR += gradx2[idx1 + 2] * rz; // Velocity w double wR = q[idx1 + 3] + gradx0[idx1 + 3] * rx; wR += gradx1[idx1 + 3] * ry; wR += gradx2[idx1 + 3] * rz; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Compute averages */ //double p = 0.5f * (pL + pR); double u = 0.5f * (uL + uR); double v = 0.5f * (vL + vR); double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double phi1 = xn * B; phi1 += u * ubar; double phi2 = yn * B; phi2 += v * ubar; double phi3 = zn * B; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double c2 = ubar * ubar + B; double c = sqrt(c2); /* Now compute eigenvalues, eigenvectors, and strengths */ double eig1 = fabs(ubar); double eig2 = fabs(ubar); double eig3 = fabs(ubar + c); double eig4 = fabs(ubar - c); double dp = pR - pL; double du = uR - uL; double dv = vR - vL; double dw = wR - wL; /* Components of T(inverse) */ double ti11 = u * phi4; ti11 += v * phi5; ti11 += w * phi6; ti11 = -ti11 / B; double ti21 = u * phi7; ti21 += v * phi8; ti21 += w * phi9; ti21 = -ti21 / B; double ti31 = 0.5f * (c - ubar); ti31 /= B; double ti41 = -0.5f * (c + ubar); ti41 /= B; /* jumps (T(inverse) * dq) */ double dv1 = ti11 * dp; dv1 += phi4 * du; dv1 += phi5 * dv; dv1 += phi6 * dw; dv1 /= c2; double dv2 = ti21 * dp; dv2 += phi7 * du; dv2 += phi8 * dv; dv2 += phi9 * dw; dv2 /= c2; double dv3 = 2.f * ti31 * dp; dv3 += xn * du; dv3 += yn * dv; dv3 += zn * dw; dv3 *= 0.5f / c2; double dv4 = 2.f * ti41 * dp; dv4 += xn * du; dv4 += yn * dv; dv4 += zn * dw; dv4 *= 0.5f / c2; /* Now get elements of T */ double r13 = c * B; double r23 = u * (ubar + c); r23 += xn * B; double r33 = v * (ubar + c); r33 += yn * B; double r43 = w * (ubar + c); r43 += zn * B; double r14 = -c * B; double r24 = u * (ubar - c); r24 += xn * B; double r34 = v * (ubar - c); r34 += yn * B; double r44 = w * (ubar - c); r44 += zn * B; /* Calculate T* |lambda| * T(inverse) */ double t1 = eig3 * r13 * dv3 + eig4 * r14 * dv4; double t2 = eig1 * X1 * dv1 + eig2 * X2 * dv2; t2 += eig3 * r23 * dv3 + eig4 * r24 * dv4; double t3 = eig1 * Y1 * dv1 + eig2 * Y2 * dv2; t3 += eig3 * r33 * dv3 + eig4 * r34 * dv4; double t4 = eig1 * Z1 * dv1 + eig2 * Z2 * dv2; t4 += eig3 * r43 * dv3 + eig4 * r44 * dv4; /* Modify to calculate .5(fl +fr) from nodes instead of extrapolated ones */ double fluxp1 = ln * B * ubarL; double fluxp2 = ln * (uL * ubarL + xn * pL); double fluxp3 = ln * (vL * ubarL + yn * pL); double fluxp4 = ln * (wL * ubarL + zn * pL); /* Now the right side */ double fluxm1 = ln * B * ubarR; double fluxm2 = ln * (uR * ubarR + xn * pR); double fluxm3 = ln * (vR * ubarR + yn * pR); double fluxm4 = ln * (wR * ubarR + zn * pR); double res1 = 0.5f * (fluxp1 + fluxm1 - ln * t1); double res2 = 0.5f * (fluxp2 + fluxm2 - ln * t2); double res3 = 0.5f * (fluxp3 + fluxm3 - ln * t3); double res4 = 0.5f * (fluxp4 + fluxm4 - ln * t4); if(part[node0] == t) { r[idx0 + 0] = r[idx0 + 0] + res1; r[idx0 + 1] = r[idx0 + 1] + res2; r[idx0 + 2] = r[idx0 + 2] + res3; r[idx0 + 3] = r[idx0 + 3] + res4; } if(part[node1] == t) { r[idx1 + 0] = r[idx1 + 0] - res1; r[idx1 + 1] = r[idx1 + 1] - res2; r[idx1 + 2] = r[idx1 + 2] - res3; r[idx1 + 3] = r[idx1 + 3] - res4; } } } uint32_t i; for(i = 0; i < snfc; i++) { uint32_t if0 = snfic[i]; uint32_t if1 = snfic[i+1]; uint32_t j; #pragma omp parallel for for(j = if0; j < if1; j++) { uint32_t node0 = sn0[j]; uint32_t node1 = sn1[j]; uint32_t node2 = sn2[j]; double p1 = q[bsz * node0]; double p2 = q[bsz * node1]; double p3 = q[bsz * node2]; double ax = xyz0[node1] - xyz0[node0]; double ay = xyz1[node1] - xyz1[node0]; double az = xyz2[node1] - xyz2[node0]; double bx = xyz0[node2] - xyz0[node0]; double by = xyz1[node2] - xyz1[node0]; double bz = xyz2[node2] - xyz2[node0]; /* Normal points away from grid interior. Magnitude is 1/3 area of surface triangle. */ double xn = ay * bz; xn -= az * by; xn *= MAG1; double yn = ax * bz; yn -= az * bx; yn *= MAG0; double zn = ax * by; zn -= ay * bx; zn *= MAG1; double pa = 0.125f * (p2 + p3); pa += 0.75f * p1; double pb = 0.125f * (p3 + p1); pb += 0.75f * p2; double pc = 0.125f * (p1 + p2); pc += 0.75f * p3; uint32_t idx; idx = bsz * node0; r[idx + 1] += xn * pa; r[idx + 2] += yn * pa; r[idx + 3] += zn * pa; idx = bsz * node1; r[idx + 1] += xn * pb; r[idx + 2] += yn * pb; r[idx + 3] += zn * pb; idx = bsz * node2; r[idx + 1] += xn * pc; r[idx + 2] += yn * pc; r[idx + 3] += zn * pc; } } /* Do the free boundaries */ #pragma omp parallel for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; /* Get normal and "other" 2 vectors. Remember that fxn,fyn and fzn has the magnitude of the face contained in it. */ double xn = f_xyz0[i]; double yn = f_xyz1[i]; double zn = f_xyz2[i]; double area = xn * xn; area += yn * yn; area += zn * zn; area = sqrt(area); xn /= area; yn /= area; zn /= area; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double X1, Y1, Z1; double dot = xn; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = -dot * yn; Z1 = -dot * zn; } else { dot = yn; X1 = -dot * xn; Y1 = 1.f - dot * yn; Z1 = -dot * zn; } /* Normalize the first vector (V1) */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Calculate elements of T and T(inverse) evaluated at free-stream */ double ubar0 = xn * U; ubar0 += yn * V; ubar0 += zn * W; double c20 = ubar0 * ubar0 + B; double c0 = sqrt(c20); double phi1 = xn * B; phi1 += U * ubar0; double phi2 = yn * B; phi2 += V * ubar0; double phi3 = zn * B; phi3 += W * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; double t13 = c0 * B; double t23 = U * (ubar0 + c0); t23 += xn * B; double t33 = V * (ubar0 + c0); t33 += yn * B; double t43 = W * (ubar0 + c0); t43 += zn * B; double t14 = -c0 * B; double t24 = U * (ubar0 - c0); t24 += xn * B; double t34 = V * (ubar0 - c0); t34 += yn * B; double t44 = W * (ubar0 - c0); t44 += zn * B; double ti11 = U * phi4; ti11 += V * phi5; ti11 += W * phi6; ti11 = -ti11/B; double ti21 = U * phi7; ti21 += V * phi8; ti21 += W * phi9; ti21 = -ti21/B; double ti31 = 0.5f * (c0 - ubar0); ti31 /= B; double ti41 = -0.5f * (c0 + ubar0); ti41 /= B; /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* If ubar is negative, take the reference condition from outside */ double pr, ur, vr, wr; if(un > 0.f) { pr = pi; ur = ui; vr = vi; wr = wi; } else { pr = P; ur = U; vr = V; wr = W; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += phi4 * ur; rhs1 += phi5 * vr; rhs1 += phi6 * wr; rhs1 /= c20; double rhs2 = ti21 * pr; rhs2 += phi7 * ur; rhs2 += phi8 * vr; rhs2 += phi9 * wr; rhs2 /= c20; double rhs3 = 2.f * ti31 * pi; rhs3 += xn * ui; rhs3 += yn * vi; rhs3 += zn * wi; rhs3 = 0.5f * rhs3 / c20; double rhs4 = 2.f * ti41 * P; rhs4 += xn * U; rhs4 += yn * V; rhs4 += zn * W; rhs4 = 0.5f * rhs4 / c20; /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double ubar = xn * ub; ubar += yn * vb; ubar += zn * wb; uint32_t idx = (unsigned int) bsz * n; r[idx + 0] += area * B * ubar; r[idx + 1] += area * (ub * ubar + xn * pb); r[idx + 2] += area * (vb * ubar + yn * pb); r[idx + 3] += area * (wb * ubar + zn * pb); } } void ComputeFlux(const GEOMETRY *g, const double *q, GRADIENT *grad, double *r) { BENCH start_bench = rdbench(); _KRN_ComputeFlux( g->b->f->sz, g->c->b, g->b->f->nptr, g->b->f->xyz->x0, g->b->f->xyz->x1, g->b->f->xyz->x2, g->s->i, g->n->part, g->e->eptr->n0, g->e->eptr->n1, g->e->xyzn->x0, g->e->xyzn->x1, g->e->xyzn->x2, g->e->xyzn->x3, q, g->c->sz, g->t->sz, g->t->i, g->n->xyz->x0, g->n->xyz->x1, g->n->xyz->x2, g->b->fc->fptr->n0, g->b->fc->fptr->n1, g->b->fc->fptr->n2, g->e->w->w0->x0, g->e->w->w0->x1, g->e->w->w0->x2, g->e->w->w1->x0, g->e->w->w1->x1, g->e->w->w1->x2, grad->x0, grad->x1, grad->x2, r ); fun3d_log(start_bench, KERNEL_FLUX); }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__isne_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_uint16 // A.*B function (eWiseMult): GB_AemultB__isne_uint16 // A*D function (colscale): GB_AxD__isne_uint16 // D*A function (rowscale): GB_DxB__isne_uint16 // C+=B function (dense accum): GB_Cdense_accumB__isne_uint16 // C+=b function (dense accum): GB_Cdense_accumb__isne_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_uint16 // C=scalar+B GB_bind1st__isne_uint16 // C=scalar+B' GB_bind1st_tran__isne_uint16 // C=A+scalar GB_bind2nd__isne_uint16 // C=A'+scalar GB_bind2nd_tran__isne_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT16 || GxB_NO_ISNE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
papi_wrap.c
/* * Copyright 2016 ARTED developers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef ARTED_USE_PAPI #include <papi.h> #include <stdio.h> #include <omp.h> #include <mpi.h> int *EventSet; double values[2]; void papi_begin_() { int ret, i; if(PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI library init error!\n"); exit(1); } if(PAPI_thread_init((unsigned long (*)(void))(omp_get_num_threads)) != PAPI_OK) { fprintf(stderr, "PAPI thread init error.\n"); exit(1); } if (PAPI_num_counters() < 2) { fprintf(stderr, "No hardware counters here, or PAPI not supported.\n"); exit(1); } EventSet = (int*) malloc(sizeof(int) * omp_get_max_threads()); for(i = 0 ; i < omp_get_max_threads() ; EventSet[i++] = PAPI_NULL); #pragma omp parallel { int t = omp_get_thread_num(); PAPI_create_eventset(&EventSet[t]); PAPI_add_event(EventSet[t], PAPI_SP_OPS); PAPI_add_event(EventSet[t], PAPI_DP_OPS); if ((ret = PAPI_start(EventSet[t])) != PAPI_OK) { fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret)); exit(1); } } for(i = 0 ; i < 2 ; values[i++] = 0); } void papi_end_() { int ret, i; long long v[2]; long long v0, v1; double vin[2]; v0 = v1 = 0; #pragma omp parallel shared(v) reduction(+:v0,v1) { int t = omp_get_thread_num(); if ((ret = PAPI_stop(EventSet[t],v)) != PAPI_OK) { fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret)); exit(1); } v0 += v[0]; v1 += v[1]; } vin[0] = v0; vin[1] = v1; MPI_Reduce(vin, values, 2, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); PAPI_shutdown(); free(EventSet); } void papi_result_(double *time) { printf("SP FLOP = %f\n", values[0]); printf("DP FLOP = %f\n", values[1]); printf("GFLOPS = %.2f\n", ((values[0] + values[1]) / *time) * 1.0e-9); } #else void papi_begin_() {} void papi_end_() {} void papi_result_(double* time) {} #endif
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) { for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(8*t3+Nx+4,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),64*t4+62),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__isgt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_01__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int64) // A*D function (colscale): GB (_AxD__isgt_int64) // D*A function (rowscale): GB (_DxB__isgt_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int64) // C=scalar+B GB (_bind1st__isgt_int64) // C=scalar+B' GB (_bind1st_tran__isgt_int64) // C=A+scalar GB (_bind2nd__isgt_int64) // C=A'+scalar GB (_bind2nd_tran__isgt_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT64 || GxB_NO_ISGT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gm_map.h
#ifndef GM_MAP_H_ #define GM_MAP_H_ #include <map> #include "gm_internal.h" #include "gm_limits.h" #include "gm_lock.h" using namespace std; //map-interface template<class Key, class Value> class gm_map { public: virtual ~gm_map() { } ; virtual bool hasKey_seq(const Key key) = 0; virtual bool hasKey_par(const Key key) { return hasKey_seq(key); } /** * Return the value that has been set for the key - if none has been specified, the defaultValue is returned */ virtual Value getValue(const Key key) = 0; virtual void setValue_par(const Key key, Value value) = 0; virtual void setValue_seq(const Key key, Value value) = 0; /** * Sets the value mapped to the given key to the default value */ virtual void removeKey_par(const Key key) = 0; virtual void removeKey_seq(const Key key) = 0; /** * Returns true if the key corresponds to the highest value in the map. * If there has no value been set in the map, false is returned */ virtual bool hasMaxValue_seq(const Key key) = 0; virtual bool hasMaxValue_par(const Key key) { return hasMaxValue_seq(key); } /** * Returns true if the key corresponds to the lowest value in the map. * If there has no value been set in the map, false is returned */ virtual bool hasMinValue_seq(const Key key) = 0; virtual bool hasMinValue_par(const Key key) { return hasMinValue_seq(key); } /** * Returns the key that corresponds to the highest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Key getMaxKey_seq() = 0; virtual Key getMaxKey_par() { return getMaxKey_seq(); } /** * Returns the key that corresponds to the lowest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Key getMinKey_seq() = 0; virtual Key getMinKey_par() { return getMinKey_seq(); } /** * Returns the highest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Value getMaxValue_seq() = 0; virtual Value getMaxValue_par() { return getMaxValue_seq(); } /** * Returns the lowest value in the map. * If there has no value been set in the map, the behavior is unspecified. */ virtual Value getMinValue_seq() = 0; virtual Value getMinValue_par() { return getMinValue_seq(); } /** * Adds the value of summand to the value mapped to key and returns the result. * This operation is atomic. If no value if mapped to key, key is mapped to summand * and summand is returned. */ virtual Value changeValueAtomicAdd(const Key key, const Value summand) = 0; /** * This operation is equivalent to 'changeValueAtomicAdd(key, -1 * subtrahend)' */ virtual Value changeValueAtomicSubtract(const Key key, const Value subtrahend) { return changeValueAtomicAdd(key, -1 * subtrahend); } virtual size_t size() = 0; virtual void clear() = 0; protected: virtual Value getDefaultValue() = 0; static bool compare_smaller(Value a, Value b) { return a < b; } static bool compare_greater(Value a, Value b) { return a > b; } static Value max(Value a, Value b) { return std::max<Value>(a, b); } static Value min(Value a, Value b) { return std::min<Value>(a, b); } }; template<class Key, class Value> class gm_map_small : public gm_map<Key, Value> { private: map<Key, Value> data; const Value defaultValue; gm_spinlock_t lock; typedef typename map<Key, Value>::iterator Iterator; template<class Function> Value getValue_generic(Function compare) { assert(size() > 0); Iterator iter = data.begin(); Value value = iter->second; for (iter++; iter != data.end(); iter++) { if (compare(iter->second, value)) { value = iter->second; } } return value; } template<class Function> Key getKey_generic(Function compare) { assert(size() > 0); Iterator iter = data.begin(); Key key = iter->first; Value value = iter->second; for (iter++; iter != data.end(); iter++) { if (compare(iter->second, value)) { key = iter->first; value = iter->second; } } return key; } template<class Function> bool hasValue_generic(Function compare, const Key key) { if (size() == 0 || !hasKey_seq(key)) return false; Value value = data[key]; bool result = true; for (Iterator iter = data.begin(); iter != data.end(); iter++) { if (compare(iter->second, value)) result = false; } return result; } protected: Value getDefaultValue() { return defaultValue; } public: gm_map_small(Value defaultValue) : lock(0), defaultValue(defaultValue) { } ~gm_map_small() { } bool hasKey_seq(const Key key) { return data.find(key) != data.end(); } Value getValue(const Key key) { if (hasKey_seq(key)) { return data[key]; } else { return defaultValue; } } void setValue_par(const Key key, Value value) { gm_spinlock_acquire(&lock); setValue_seq(key, value); gm_spinlock_release(&lock); } void setValue_seq(const Key key, Value value) { data[key] = value; } void removeKey_par(const Key key) { gm_spinlock_acquire(&lock); removeKey_seq(key); gm_spinlock_release(&lock); } void removeKey_seq(const Key key) { data.erase(key); } bool hasMaxValue_seq(const Key key) { return hasValue_generic(&gm_map<Key, Value>::compare_greater, key); } bool hasMinValue_seq(const Key key) { return hasValue_generic(&gm_map<Key, Value>::compare_smaller, key); } Key getMaxKey_seq() { return getKey_generic(&gm_map<Key, Value>::compare_greater); } Key getMinKey_seq() { return getKey_generic(&gm_map<Key, Value>::compare_smaller); } Value getMaxValue_seq() { return getValue_generic(&gm_map<Key, Value>::compare_greater); } Value getMinValue_seq() { return getValue_generic(&gm_map<Key, Value>::compare_smaller); } Value changeValueAtomicAdd(const Key key, const Value summand) { Value newValue = summand; gm_spinlock_acquire(&lock); if(hasKey_seq(key)) newValue += data[key]; data[key] = summand; gm_spinlock_release(&lock); return newValue; } size_t size() { return data.size(); } void clear() { data.clear(); } }; template<class Key, class Value> class gm_map_large : public gm_map<Key, Value> { private: const size_t size_; const Value defaultValue; Value* const data; bool * const valid; template<class Function> Value getValue_generic_seq(Function func, const Value initialValue) { Value value = initialValue; #pragma omp parallel { Value value_private = value; #pragma omp for nowait for (Key i = 0; i < size_; i++) { if (valid[i]) value_private = func(value_private, data[i]); } // reduction { Value value_old; Value value_new; do { value_old = value; value_new = func(value_old, value_private); if (value_old == value_new) break; } while (_gm_atomic_compare_and_swap(&(value), value_old, value_new) == false); } } return value; } template<class Function> Value getValue_generic_par(Function func, const Value initialValue) { Value value = initialValue; for (Key i = 0; i < size_; i++) { if (valid[i]) value = func(value, data[i]); } return value; } template<class Function> Key getKey_generic_par(Function compare, const Value initialValue) { Value value = initialValue; Key key = 0; for (Key i = 0; i < size_; i++) { if (valid[i] && compare(data[i], value)) { value = data[i]; key = i; } } return key; } template<class Function> Key getKey_generic_seq(Function compare, const Value initialValue) { Value value = initialValue; Key key = 0; #pragma omp parallel { Value value_private = value; Key key_private = key; #pragma omp for nowait for (Key i = 0; i < size_; i++) { if (valid[i] && compare(data[i], value_private)) { value_private = data[i]; key_private = i; } } // reduction if(compare(value_private, value)) { #pragma omp critical { if(compare(value_private, value)) { value = value_private; key = key_private; } } } } return key; } template<class Function> bool hasValue_generic_seq(Function compare, const Key key) { if (size() == 0 || !hasKey_seq(key)) return false; Value value = data[key]; bool result = true; #pragma omp parallel for for (int i = 0; i < size_; i++) if (valid[i] && compare(data[i], value)) result = false; return result; } template<class Function> bool hasValue_generic_par(Function compare, const Key key) { if (size() == 0 || !hasKey_seq(key)) return false; Value value = data[key]; for (int i = 0; i < size_; i++) if (valid[i] && compare(data[i], value)) return false; } protected: Value getDefaultValue() { return defaultValue; } public: gm_map_large(size_t size, Value defaultValue) : size_(size), data(new Value[size]), valid(new bool[size]), defaultValue(defaultValue) { #pragma omp parallel for for (int i = 0; i < size; i++) { valid[i] = false; } } ~gm_map_large() { delete[] data; delete[] valid; } bool hasKey_seq(const Key key) { return key < size_ && valid[key]; } Value getValue(const Key key) { if (hasKey_seq(key)) return data[key]; else return defaultValue; } void setValue_par(const Key key, Value value) { setValue_seq(key, value); } void setValue_seq(const Key key, Value value) { data[key] = value; valid[key] = true; } void removeKey_par(const Key key) { removeKey_seq(key); } void removeKey_seq(const Key key) { valid[key] = false; } bool hasMaxValue_seq(const Key key) { return hasValue_generic_seq(&gm_map<Key, Value>::compare_greater, key); } bool hasMaxValue_par(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_greater, key); } bool hasMinValue_seq(const Key key) { return hasValue_generic_seq(&gm_map<Key, Value>::compare_smaller, key); } bool hasMinValue_par(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_smaller, key); } Key getMaxKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMaxKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMinKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Key getMinKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Value getMaxValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMaxValue_par() { return getValue_generic_par(&gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMinValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::min, gm_get_max<Value>()); } Value getMinValue_par() { return getValue_generic_par(&gm_map<Key, Value>::min, gm_get_max<Value>()); } Value changeValueAtomicAdd(const Key key, const Value summand) { Value oldValue; Value newValue; do { oldValue = data[key]; newValue = valid[key] ? (oldValue + summand) : summand; } while (_gm_atomic_compare_and_swap(data + key, oldValue, newValue) == false); valid[key] = true; return newValue; } size_t size() { size_t result = 0; for(int i = 0; i < size_; i++) result += valid[i]; return result; } void clear() { #pragma omp parallel for for (Key key = 0; key < size(); key++) { valid[key] = false; } } }; // Map is implemnted with set of inner-maps template<class Key, class Value> class gm_map_medium : public gm_map<Key, Value> { private: const int innerSize; const Value defaultValue; map<Key, Value>* innerMaps; gm_spinlock_t* locks; typedef typename map<Key, Value>::iterator Iterator; const uint32_t bitmask; inline uint32_t getPositionFromKey(const Key key) { uint32_t P = 0; if (sizeof(Key) == 1) { const uint8_t* c = (const uint8_t*) &key; P = *c; } else if (sizeof(Key) == 2) { const uint16_t* c = (const uint16_t*) &key; P = *c; } else if (sizeof(Key) >= 4) { const uint32_t* c = (const uint32_t*) &key; P = *c; } return P & bitmask; } template<class FunctionCompare, class FunctionMinMax> Value getValue_generic_par(FunctionCompare compare, FunctionMinMax func, const Value initialValue) { Value value = initialValue; for (int i = 0; i < innerSize; i++) { if (innerMaps[i].size() > 0) { for (Iterator iter = innerMaps[i].begin(); iter != innerMaps[i].end(); iter++) { if (compare(iter->second, value)) { value = iter->second; } } } } return value; } template<class FunctionCompare, class FunctionMinMax> Value getValue_generic_seq(FunctionCompare compare, FunctionMinMax func, const Value initialValue) { Value value = initialValue; #pragma omp parallel { Value value_private = initialValue; #pragma omp for nowait for (int i = 0; i < innerSize; i++) { if (innerMaps[i].size() > 0) value_private = func(value_private, getValueAtPosition_generic(i, compare)); } // reduction { Value value_old; Value value_new; do { value_old = value; value_new = func(value_old, value_private); if (value_old == value_new) break; } while (_gm_atomic_compare_and_swap(&(value), value_old, value_new) == false); } } return value; } template<class Function> Value getValueAtPosition_generic(int position, Function compare) { Iterator iter = innerMaps[position].begin(); Value value = iter->second; for (iter++; iter != innerMaps[position].end(); iter++) { if (compare(iter->second, value)) { value = iter->second; } } return value; } template<class Function> Key getKey_generic_seq(Function compare, const Value initialValue) { Key key = 0; Value value = initialValue; #pragma omp parallel for for(int i = 0; i < innerSize; i++) { if(innerMaps[i].size() > 0) { Iterator iter = getKeyAtPosition_generic(i, compare); Key privateKey = iter->first; Value privateValue = iter->second; if(compare(privateValue, value)) { #pragma omp critical if(compare(privateValue, value)) { value = privateValue; key = privateKey; } } } } return key; } template<class Function> Key getKey_generic_par(Function compare, const Value initialValue) { Key key = 0; Value value = initialValue; for(int i = 0; i < innerSize; i++) { if(innerMaps[i].size() > 0) { for (Iterator iter = innerMaps[i].begin(); iter != innerMaps[i].end(); iter++) { if(compare(iter->second, value)) { value = iter->second; key = iter->first; } } } } return key; } template<class Function> Iterator getKeyAtPosition_generic(int position, Function compare) { Iterator iter = innerMaps[position].begin(); Iterator currentBest = iter; for (iter++; iter != innerMaps[position].end(); iter++) { if (compare(iter->second, currentBest->second)) { currentBest = iter; } } return currentBest; } template<class Function> bool hasValue_generic_par(Function compare, const Key key) { uint32_t position = getPositionFromKey(key); Value reference = getValueFromPosition(position, key); for(int i = 0; i < innerSize; i++) { if (innerMaps[i].size() > 0) { for (Iterator iter = innerMaps[i].begin(); iter != innerMaps[i].end(); iter++) { if (compare(iter->second, reference)) return false; } } } return true; } template<class Function> bool hasValue_generic_seq(Function compare, const Key key) { bool result = true; uint32_t position = getPositionFromKey(key); Value reference = getValueFromPosition(position, key); #pragma omp parallel for for(int i = 0; i < innerSize; i++) { bool tmp = hasValueAtPosition_generic(i, compare, reference); if(!tmp) result = false; } return result; } template<class Function> bool hasValueAtPosition_generic(int position, Function compare, const Value reference) { map<Key, Value>& currentMap = innerMaps[position]; if (currentMap.size() == 0) return false; for (Iterator iter = currentMap.begin(); iter != currentMap.end(); iter++) { if (compare(iter->second, reference)) return false; } } bool positionHasKey(int position, const Key key) { return innerMaps[position].find(key) != innerMaps[position].end(); } Value getValueFromPosition(int position, const Key key) { Iterator iter = innerMaps[position].find(key); if(iter != innerMaps[position].end()) return iter->second; else return defaultValue; } void setValueAtPosition(int position, const Key key, Value value) { innerMaps[position][key] = value; } void removeKeyAtPosition(int position, const Key key) { innerMaps[position].erase(key); } static unsigned getBitMask(int innerSize) { unsigned tmpMask = innerSize - 1; return tmpMask; } static int getSize(int threadCount) { int tmpSize = 32; while(tmpSize < threadCount) { tmpSize *= 2; } // we will use only up to 4B for positioninig assert(tmpSize <= 1024*1024*1024); return tmpSize; } protected: Value getDefaultValue() { return defaultValue; } public: gm_map_medium(int threadCount, Value defaultValue) : innerSize(getSize(threadCount)), bitmask(getBitMask(innerSize)), defaultValue(defaultValue) { locks = new gm_spinlock_t[innerSize]; innerMaps = new map<Key, Value>[innerSize]; #pragma omp parallel for for(int i = 0; i < innerSize; i++) { locks[i] = 0; } } ~gm_map_medium() { delete[] innerMaps; delete[] locks; } bool hasKey_seq(const Key key) { uint32_t position = getPositionFromKey(key); return positionHasKey(position, key); } Value getValue(const Key key) { uint32_t position = getPositionFromKey(key); return getValueFromPosition(position, key); } void setValue_par(const Key key, Value value) { uint32_t position = getPositionFromKey(key); gm_spinlock_acquire(locks + position); setValueAtPosition(position, key, value); gm_spinlock_release(locks + position); } void setValue_seq(const Key key, Value value) { uint32_t position = getPositionFromKey(key); setValueAtPosition(position, key, value); } void removeKey_par(const Key key) { uint32_t position = getPositionFromKey(key); gm_spinlock_acquire(locks + position); removeKeyAtPosition(position, key); gm_spinlock_release(locks + position); } void removeKey_seq(const Key key) { uint32_t position = getPositionFromKey(key); removeKeyAtPosition(position, key); } bool hasMaxValue_seq(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_greater, key); } bool hasMinValue_seq(const Key key) { return hasValue_generic_par(&gm_map<Key, Value>::compare_smaller, key); } Key getMaxKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMaxKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_greater, gm_get_min<Value>()); } Key getMinKey_seq() { return getKey_generic_seq(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Key getMinKey_par() { return getKey_generic_par(&gm_map<Key, Value>::compare_smaller, gm_get_max<Value>()); } Value getMaxValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::compare_greater, &gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMaxValue_par() { return getValue_generic_par(&gm_map<Key, Value>::compare_greater, &gm_map<Key, Value>::max, gm_get_min<Value>()); } Value getMinValue_seq() { return getValue_generic_seq(&gm_map<Key, Value>::compare_smaller, &gm_map<Key, Value>::min, gm_get_max<Value>()); } Value getMinValue_par() { return getValue_generic_par(&gm_map<Key, Value>::compare_smaller, &gm_map<Key, Value>::min, gm_get_max<Value>()); } Value changeValueAtomicAdd(const Key key, const Value summand) { uint32_t position = getPositionFromKey(key); gm_spinlock_acquire(locks + position); Value newValue = summand; if(positionHasKey(position, key)) newValue += getValueFromPosition(position, key); setValueAtPosition(position, key, newValue); gm_spinlock_release(locks + position); return newValue; } size_t size() { size_t size = 0; #pragma omp parallel for reduction(+ : size) for(int i = 0; i < innerSize; i++) { size += innerMaps[i].size(); } return size; } void clear() { #pragma omp parallel for for(int i = 0; i < innerSize; i++) { innerMaps[i].clear(); } } }; #endif /* GM_MAP_H_ */
DRB066-pointernoaliasing-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Freshly allocated pointers do not alias to each other. */ #include <stdlib.h> void setup(int N) { double * m_pdv_sum = (double* ) malloc (sizeof (double) * N ); double * m_nvol = (double* ) malloc (sizeof (double) * N ); #pragma omp parallel for schedule(static) for (int i=0; i < N; ++i ) { m_pdv_sum[ i ] = 0.0; m_nvol[ i ] = i*2.5; } for (int i=0; i < N; ++i ) { printf("%lf\n", m_pdv_sum[ i ]); printf("%lf\n", m_nvol[ i ]); } free(m_pdv_sum); free(m_nvol); } int main() { int N =1000; setup(N); }
binbased_projection.h
// KRATOS __ __ _____ ____ _ _ ___ _ _ ____ // | \/ | ____/ ___|| | | |_ _| \ | |/ ___| // | |\/| | _| \___ \| |_| || || \| | | _ // | | | | |___ ___) | _ || || |\ | |_| | // |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Antonia Larese De Tetto // #if !defined(KRATOS_BINBASED_PROJECTION ) #define KRATOS_BINBASED_PROJECTION //External includes // System includes #include <string> #include <iostream> #include <stdlib.h> // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/timer.h" #include "meshing_application.h" //Database includes #include "spatial_containers/spatial_containers.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/binbased_nodes_in_element_locator.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// This class allows the interpolation between non-matching meshes in 2D and 3D. /** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu> * * This class allows the interpolation of a scalar or vectorial variable between non-matching meshes * in 2D and 3D. * * For every node of the destination model part it is checked in which element of the origin model part it is * contained and a linear interpolation is performed * * The data structure used by default is static bin. * In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator * and a bin of nodes @see BinBasedFastPointLocator * is required at the beginning of the calculation (only ONCE). */ //class BinBasedMeshTransfer template<std::size_t TDim > class BinBasedMeshTransfer { public: ///@name Type Definitions ///@{ /// Pointer definition of BinBasedMeshTransfer KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >); /// Node type definition typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. BinBasedMeshTransfer() = default; // /// Destructor. virtual ~BinBasedMeshTransfer() = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ //If you want to pass the whole model part //********************************************************************** //********************************************************************** /// Interpolate the whole problem type /** * @param rOrigin_ModelPart: the model part all the variable should be taken from * @param rDestination_ModelPart: the destination model part where we want to know the values of the variables */ void DirectInterpolation( ModelPart& rOrigin_ModelPart , ModelPart& rDestination_ModelPart ) { KRATOS_TRY KRATOS_ERROR << "Not implemented yet" << std::endl; KRATOS_CATCH("") } //If you want to pass only one variable //********************************************************************** //********************************************************************** /// Interpolate one variable from the fixed mesh to the moving one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h */ // Form fixed to moving model part template<class TDataType> void DirectVariableInterpolation( ModelPart& rFixed_ModelPart , ModelPart& rMoving_ModelPart, Variable<TDataType>& rFixedDomainVariable , Variable<TDataType>& rMovingDomainVariable, BinBasedFastPointLocator<TDim>& node_locator ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl; //creating an auxiliary list for the new nodes for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rMovingDomainVariable); } Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { //Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable ); Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable ); } } KRATOS_CATCH("") } /// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use /// MappingFromMovingMesh_VariableMeshes that is a much generic tool. /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator */ // From moving to fixed model part template<class TDataType> void MappingFromMovingMesh( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl; if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { // if (node_it->IsFixed(VELOCITY_X) == false) // { // (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); // (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0; (node_it)->GetValue(YOUNG_MODULUS) = 0.0; // } } //defintions for spatial search // typedef NodeType PointType; // typedef NodeType::Pointer PointTypePointer; Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { GeometryType& geom = pelement->GetGeometry(); // const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY); // const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE); const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable); for (std::size_t k = 0; k < geom.size(); k++) { geom[k].SetLock(); geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value; geom[k].GetValue(YOUNG_MODULUS) += N[k]; geom[k].UnSetLock(); } } } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { const double NN = (node_it)->GetValue(YOUNG_MODULUS); if (NN != 0.0) { (node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN; } } KRATOS_CATCH("") } // From moving to fixed model part /// Interpolate one variable from the moving mesh to the fixed one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator */ template<class TDataType> void MappingFromMovingMesh_VariableMeshes( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_WATCH("Transfer From Moving Mesh*************************************") if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } //defintions for spatial search typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector; typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector; const std::size_t max_results = 5000; Matrix Nmat(max_results,TDim+1); boost::numeric::ublas::vector<int> positions(max_results); PointVector work_results(max_results); DistanceVector work_distances(max_results); Node<3> work_point(0,0.0,0.0,0.0); for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it) { std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point); for(std::size_t k=0; k<nfound; k++) { auto it = work_results.begin() + positions[k]; array_1d<double,TDim+1> N = row(Nmat,k); Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable); } } KRATOS_CATCH("") } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member rVariables ///@{ ///@} ///@name Protected member rVariables ///@{ template<class T, std::size_t dim> ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member rVariables ///@{ ///@} ///@name Member rVariables ///@{ inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); xc = 0.3333333333333333333*(x0+x1+x2); yc = 0.3333333333333333333*(y0+y1+y2); zc = 0.0; double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; R = 1.01 * sqrt(R); } //*************************************** //*************************************** inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); xc = 0.25*(x0+x1+x2+x3); yc = 0.25*(y0+y1+y2+y3); zc = 0.25*(z0+z1+z2+z3); double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2); double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; if(R4 > R) R = R4; R = sqrt(R); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ*0.1666666666666666666667; //return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0,y0,x1,y1,x2,y2); double inv_area = 0.0; if(area == 0.0) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero area found",""); //The interpolated node will not be inside an elemente with zero area return false; } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area; N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area; N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3); double inv_vol = 0.0; if(vol < 0.0000000000001) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found",""); //The interpolated node will not be inside an elemente with zero volume return false; // KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol; N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol; N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol; N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } //ElemI Element iterator //N Shape functions //step_data_size //pnode pointer to the node //projecting total model part 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, int step_data_size, NodeType::Pointer pnode) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (pnode)->SolutionStepData().Data(step); double* node0_data = geom[0].SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = N[0]*node0_data[j]; } for(std::size_t k= 1; k< vector_size; k++) { double* node1_data = geom[k].SolutionStepData().Data(step); for(int j= 0; j< step_data_size; j++) { step_data[j] += N[k]*node1_data[j]; } } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting an array1D 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<array_1d<double,3> >& rOriginVariable, Variable<array_1d<double,3> >& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting a scalar 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); //facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi??? for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA //copying this data in the position of the vector we are interested in step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size ) { std::size_t buffer_size = node_it->GetBufferSize(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (node_it)->SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = 0.0; } } } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable) { array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); noalias(Aux_var) = ZeroVector(3); } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable) { double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); Aux_var = 0.0; } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther); ///@} }; // Class BinBasedMeshTransfer ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// output stream function template<std::size_t TDim> inline std::ostream& operator << (std::ostream& rOStream, const BinBasedMeshTransfer<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_BINBASED_PROJECTION defined
ssha512_fmt_plug.c
/* * ssha512 support for LDAP style password storage * * This software is Copyright (c) 2013 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_saltedsha2; #elif FMT_REGISTERS_H john_register_one(&fmt_saltedsha2); #else #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "johnswap.h" #include "common.h" #include "sha2.h" #include "base64.h" #include "simd-intrinsics.h" #include <string.h> #include "rawSHA512_common.h" #ifdef _OPENMP #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 1024 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "SSHA512" #define FORMAT_NAME "LDAP" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define PLAINTEXT_LENGTH (111-NSLDAP_SALT_LEN) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif struct s_salt { unsigned int len; union { unsigned char c[NSLDAP_SALT_LEN]; ARCH_WORD_32 w32; } data; }; static struct s_salt *saved_salt; #ifdef SIMD_COEF_64 #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 ) static ARCH_WORD_64 (*saved_key)[SHA_BUF_SIZ*SIMD_COEF_64]; static ARCH_WORD_64 (*crypt_out)[8*SIMD_COEF_64]; static ARCH_WORD_64 (**len_ptr64); static int max_count; #else static ARCH_WORD_32 (*crypt_out)[DIGEST_SIZE / 4]; static ARCH_WORD_64 (*saved_key)[PLAINTEXT_LENGTH + 1]; #endif static int *saved_len; static void init(struct fmt_main *self) { #ifdef SIMD_COEF_64 unsigned int i, j; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); #ifndef SIMD_COEF_64 saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); #else len_ptr64 = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*len_ptr64), MEM_ALIGN_SIMD); saved_key = mem_calloc_align(self->params.max_keys_per_crypt / SIMD_COEF_64, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt / SIMD_COEF_64, sizeof(*crypt_out), MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; i += SIMD_COEF_64) { ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64 *)saved_key)[(i&(SIMD_COEF_64-1)) + (i/SIMD_COEF_64)*SHA_BUF_SIZ*SIMD_COEF_64]; for (j = 0; j < SIMD_COEF_64; ++j) { len_ptr64[i+j] = &keybuffer[15*SIMD_COEF_64]; ++keybuffer; } } max_count = self->params.max_keys_per_crypt; #endif } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); #ifdef SIMD_COEF_64 MEM_FREE(len_ptr64); #endif MEM_FREE(saved_len); } #ifdef SIMD_COEF_64 static void set_key(char *key, int index) { #if ARCH_ALLOWS_UNALIGNED const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t)); const ARCH_WORD_64 *wkey = is_aligned(key, sizeof(uint64_t)) ? (ARCH_WORD_64*)key : (ARCH_WORD_64*)strcpy(buf_aligned, key); #endif ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64 *)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; ARCH_WORD_64 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_64 temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP64(temp & 0xff); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP64(temp & 0xffff); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP64(temp & 0xffffff); len+=3; goto key_cleaning; } if (!(temp & 0xff00000000ULL)) { *keybuf_word = JOHNSWAP64(temp & 0xffffffff); len+=4; goto key_cleaning; } if (!(temp & 0xff0000000000ULL)) { *keybuf_word = JOHNSWAP64(temp & 0xffffffffffULL); len+=5; goto key_cleaning; } if (!(temp & 0xff000000000000ULL)) { *keybuf_word = JOHNSWAP64(temp & 0xffffffffffffULL); len+=6; goto key_cleaning; } if (!(temp & 0xff00000000000000ULL)) { *keybuf_word = JOHNSWAP64(temp & 0xffffffffffffffULL); len+=7; goto key_cleaning; } *keybuf_word = JOHNSWAP64(temp); len += 8; keybuf_word += SIMD_COEF_64; } key_cleaning: saved_len[index] = len; keybuf_word += SIMD_COEF_64; while(*keybuf_word && keybuf_word < &keybuffer[15*SIMD_COEF_64]) { *keybuf_word = 0; keybuf_word += SIMD_COEF_64; } } #else static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; memcpy(saved_key[index], key, len + 1); } #endif static void * get_salt(char * ciphertext) { static struct s_salt cursalt; char *p; char realcipher[CIPHERTEXT_LENGTH]; int len; ciphertext += NSLDAP_TAG_LENGTH; memset(realcipher, 0, sizeof(realcipher)); memset(&cursalt, 0, sizeof(struct s_salt)); len = strlen(ciphertext); base64_decode(ciphertext, len, realcipher); // We now support any salt length up to NSLDAP_SALT_SIZE cursalt.len = (len + 3) / 4 * 3 - DIGEST_SIZE; p = &ciphertext[len]; while (*--p == '=') cursalt.len--; memcpy(cursalt.data.c, realcipher+DIGEST_SIZE, cursalt.len); return &cursalt; } #ifdef SIMD_COEF_64 static char *get_key(int index) { unsigned i; ARCH_WORD_64 s; static char out[PLAINTEXT_LENGTH + 1]; unsigned char *wucp = (unsigned char*)saved_key; s = saved_len[index]; for(i=0;i<(unsigned)s;i++) out[i] = wucp[ GETPOS(i, index) ]; out[i] = 0; return (char*) out; } #else static char *get_key(int index) { return (char*)saved_key[index]; } #endif static int cmp_all(void *binary, int count) { unsigned int index; for (index = 0; index < count; index++) #ifdef SIMD_COEF_64 if (((ARCH_WORD_64 *) binary)[0] == crypt_out[index/SIMD_COEF_64][index&(SIMD_COEF_64-1)]) #else if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_64 int i; for (i = 0; i < DIGEST_SIZE/sizeof(ARCH_WORD_64); i++) if (((ARCH_WORD_64 *) binary)[i] != crypt_out[index/SIMD_COEF_64][(index&(SIMD_COEF_64-1))+i*SIMD_COEF_64]) return 0; return 1; #else return !memcmp(binary, crypt_out[index], DIGEST_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } static void set_salt(void *salt) { saved_salt = salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index+=MAX_KEYS_PER_CRYPT) { #ifndef SIMD_COEF_64 SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], saved_len[index]); SHA512_Update(&ctx, saved_salt->data.c, saved_salt->len); SHA512_Final((unsigned char*)crypt_out[index], &ctx); #else // We have to append salt (and re-clean buffer if it is dirty), // then append final length of password.salt int i, j; unsigned char *sk = (unsigned char*)saved_key; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { int idx = i+index; int x = saved_len[idx]; for (j = 0; j < saved_salt->len; ++j) sk[GETPOS(x+j,idx)] = saved_salt->data.c[j]; x += j; sk[GETPOS(x,idx)] = 0x80; ++x; while (sk[GETPOS(x,idx)]) { sk[GETPOS(x,idx)] = 0; ++x; } *(len_ptr64[idx]) = (saved_len[idx]+saved_salt->len)<<3; } SIMDSHA512body(&saved_key[index/SIMD_COEF_64], crypt_out[index/SIMD_COEF_64], NULL, SSEi_MIXED_IN); #endif } return count; } #ifdef SIMD_COEF_64 static int get_hash_0 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_0; } static int get_hash_1 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_1; } static int get_hash_2 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_2; } static int get_hash_3 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_3; } static int get_hash_4 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_4; } static int get_hash_5 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_5; } static int get_hash_6 (int index) { return crypt_out[(unsigned int)index/SIMD_COEF_64][index&(SIMD_COEF_64-1)] & PH_MASK_6; } #else static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } #endif static int salt_hash(void *salt) { struct s_salt * mysalt = salt; return mysalt->data.w32 & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_saltedsha2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, NSLDAP_BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, DIGEST_SIZE, BINARY_ALIGN, NSLDAP_SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NSLDAP_FORMAT_TAG }, sha512_common_tests_ssha512 }, { init, done, fmt_default_reset, fmt_default_prepare, sha512_common_valid_nsldap, fmt_default_split, sha512_common_binary_nsldap, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
9232.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for simd schedule(static, 2) num_threads(2) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp dist_schedule(static, #p11) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
work_lhs.h
//--------------------------------------------------------------------- //--------------------------------------------------------------------- // // work_lhs.h // //--------------------------------------------------------------------- //--------------------------------------------------------------------- #ifndef __WORK_LHS_H #define __WORK_LHS_H #define fjac(m,n,i) fjac[(m-1)+5*((n-1)+5*(i+2))] #define njac(m,n,i) njac[(m-1)+5*((n-1)+5*(i+2))] #define lhsa(m,n,i) lhsa[(m-1)+5*((n-1)+5*(i+1))] #define lhsb(m,n,i) lhsb[(m-1)+5*((n-1)+5*(i+1))] #ifdef G_MAIN double fjac[5*5*(MAX_CELL_DIM+4)], njac[5*5*(MAX_CELL_DIM+4)], lhsa[5*5*(MAX_CELL_DIM+2)], lhsb[5*5*(MAX_CELL_DIM+2)], tmp1, tmp2, tmp3; // common /work_lhs/ fjac, njac, lhsa, lhsb, tmp1, tmp2, tmp3; #else extern double fjac[5*5*(MAX_CELL_DIM+4)], njac[5*5*(MAX_CELL_DIM+4)], lhsa[5*5*(MAX_CELL_DIM+2)], lhsb[5*5*(MAX_CELL_DIM+2)], tmp1, tmp2, tmp3; #endif /*G_MAIN*/ #ifdef _OPENMP #pragma omp threadprivate (fjac, njac, lhsa, lhsb, tmp1, tmp2, tmp3) #endif #endif
fusion_nb.c
#include <stdlib.h> #include "fusion_nb.h" void fusion_nb(float* l,int m,int n,float*output){ #pragma omp parallel for for (int H4 = 1; H4 < (n - (1 + 0)); H4++) { for (int H5 = 1; H5 < (m - (1 + 0)); H5++) { float tmp2 = 0; float tmp3 = 0; float tmp4 = 0; float tmp5 = 0; tmp5 = l[(((m)) * (H4 - (1))) + H5 - (1)]; float tmp6 = 0; tmp6 = l[(((m)) * (H4 - (1))) + H5]; tmp4 = tmp5 + tmp6; float tmp7 = 0; tmp7 = l[(((m)) * (H4 - (1))) + H5 + 1]; tmp3 = tmp4 + tmp7; float tmp8 = 0; float tmp9 = 0; float tmp10 = 0; tmp10 = l[(((m)) * (H4)) + H5 - (1)]; float tmp11 = 0; tmp11 = l[(((m)) * (H4)) + H5]; tmp9 = tmp10 + tmp11; float tmp12 = 0; tmp12 = l[(((m)) * (H4)) + H5 + 1]; tmp8 = tmp9 + tmp12; tmp2 = tmp3 + tmp8; float tmp13 = 0; float tmp14 = 0; float tmp15 = 0; tmp15 = l[(((m)) * (H4 + 1)) + H5 - (1)]; float tmp16 = 0; tmp16 = l[(((m)) * (H4 + 1)) + H5]; tmp14 = tmp15 + tmp16; float tmp17 = 0; tmp17 = l[(((m)) * (H4 + 1)) + H5 + 1]; tmp13 = tmp14 + tmp17; output[(((m - (1 + 0)) - 1)) * ((H4 - (1))) + (H5 - (1))] = tmp2 + tmp13; } } }
task_mergeable.c
int foo (int x ) { #pragma omp task shared(x) mergeable { x++; } #pragma omp taskwait return x; }
trans.c
/***************************\ * CUDA Accelerated Matrix * * Transpose Test benchmark * * * * by * * Elliott Forney * * 3.15.2010 * \***************************/ /* * Libraries */ #include <stdio.h> #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <unistd.h> #include <getopt.h> #include <math.h> #include <cblas.h> #include "matrix.h" #include "benchmark.h" #include "errcheck.h" /* * Macros */ // print command line usage #define print_usage() fprintf(stdout, "Usage: %s [-m rows] [-n cols] [-s]\n", arg[0]) // acceptable maximum relative error tolerance #define TOLERANCE 0.01 #define DEBUG 0 /* * Global variables */ // flag for simple, one line output bool simple_out = false; /* * Function prototypes */ // parse command line arguments void parse_args(int narg, char **arg, unsigned *m, unsigned *n); void naive_trans(matrix a, matrix b); /* * Function bodies */ // setup network int main(int narg, char **arg) { // default matrix dimensions unsigned m = 6400; unsigned n = 0; // parse command line arguments parse_args(narg, arg, &m, &n); // if n or p == 0 then set to m if (n == 0) n = m; if (DEBUG > 0) printf("m: %d\nn: %d\n", m, n); // load test matrix a matrix a; matrix_init(&a, n, m); // load test matrix b matrix b; matrix_init(&b, m, n); matrix_load_runif(b, 0, 1); //matrix_load_testb(b); // figure a with naive method naive_trans(a, b); // create matrix to hold result matrix result; matrix_init(&result, n, m); matrix_load_runif(result, 0, 1); // run warm up matrix_trans(result, b); // wait for all kernels to finish matrix_wait(); // create a new benchmark timer benchmark ben; benchmark_init(&ben); // start timer benchmark_start_timer(&ben); // run multiplication matrix_trans(result, b); // wait for all kernels to finish matrix_wait(); // stop timer benchmark_stop_timer(&ben); if (DEBUG > 4) { printf("a:\n"); matrix_print_padded(a); printf("b:\n"); matrix_print_padded(b); printf("result:\n"); matrix_print_padded(result); } // figure giga floating point operatins per second benchmark_add_byte(&ben, 2*m*n*sizeof(float)); double gbytes = benchmark_check_gbytes(ben); double time = benchmark_check_timer(ben); // if simple output requested if (simple_out) { if (isinf(gbytes)) gbytes = 0.0f; // simply print time, gbytess and error //printf("%f %f\n", time*1e3f, gbytes); printf("%f\n", gbytes); } // if full output requested else { // float rmse = matrix_rmse(a, result); bool passed = true; if (rmse > TOLERANCE) passed = false; if (passed) printf("Test Passed!\n=======\n"); else printf("Test Failed!\n=======\n"); // print time and gbytess printf("Time: %f GByteS: %f RMSE: %f\n", time, gbytes, rmse); } // clean up matrix_dest(&a); matrix_dest(&b); matrix_dest(&result); benchmark_dest(&ben); // Come back soon now ya'hear! return 0; } // parse command line arguments void parse_args(int narg, char **arg, unsigned *m, unsigned *n) { int opt; // getopt output // for each argument while ((opt = getopt(narg, arg, "sm:n:")) != -1) { if (opt == 's') simple_out = true; else if (opt == 'm') *m = (unsigned)atoi(arg[optind-1]); else if (opt == 'n') *n = (unsigned)atoi(arg[optind-1]); // print usage and quit on unknown option else { print_usage(); exit(1); } } // assume last non dash arg is m if (optind < narg) *m = (unsigned)atoi(arg[optind]); } void naive_trans(matrix a, matrix b) { // check dimensions unsigned r, c; float *adata = a.cpu_data; float *bdata = b.cpu_data; const unsigned astride = a.cstride; const unsigned bstride = b.cstride; // #pragma omp parallel for private(r) for (c = 0; c < a.c; ++c) for (r = 0; r < a.r; ++r) adata[r*astride+c] = bdata[c*bstride+r]; }
jacobi_omp.c
/* * Copyright (c) 2008, BSC (Barcelon Supercomputing Center) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> #include <time.h> #define NB 64 #define B 128 #define FALSE (0) #define TRUE (1) typedef double fp_type; typedef fp_type *vin; typedef fp_type *vout; typedef fp_type *bin; typedef fp_type *binout; fp_type *A[NB][NB]; fp_type *A_new[NB][NB]; fp_type *tmp[NB][NB]; void alloc_and_genmat() { int init_val, i, j, ii, jj; fp_type *p, *p_new; init_val = 1325; for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type)); if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL) { printf("Out of memory\n"); exit(1); } p = A[ii][jj]; p_new = A_new[ii][jj]; for (i = 0; i < B; i++) { for (j = 0; j < B; j++) { init_val = (3125 * init_val) % 65536; (*p) = (fp_type)((init_val - 32768.0) / 16384.0); (*p_new) = (*p); p++; p_new++; } } } } } long usecs(void) { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } void clear(vout v) { int i, j, k; for (i = 0; i < B; i++) v[i] = (fp_type)0.0; } void getlastrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[(B - 1) * B + j]; } void getlastcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + B - 1]; } void getfirstrow(bin A, vout v) { int j; for (j = 0; j < B; j++) v[j] = A[0 * B + j]; } void getfirstcol(bin A, vout v) { int i; for (i = 0; i < B; i++) v[i] = A[i * B + 0]; } void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new) { int i, j; fp_type tmp; fp_type left, top, right, bottom; for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { tmp = A[i * B + j]; left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]); top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]); right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]); bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]); A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom); } } } double maxdelta() { double dmax = -__DBL_MAX__; int ii, jj, i, j; #pragma omp parallel for schedule(static) reduction(max: dmax) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { for (i = 0; (i < B); i++) { for (j = 0; j < B; j++) { double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]); if(diff > dmax) dmax = diff; } } } } return dmax; } void compute(int niters) { int iters; int ii, jj; fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B]; double delta = 2.0; double epsilon = 1e-7; iters = 0; // for (iters = 0; iters < niters; iters++) while(iters < niters) { ++iters; #pragma omp parallel \ private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \ shared(A, A_new) { #pragma omp for schedule(static) for (ii = 0; ii < NB; ii++) { for (jj = 0; jj < NB; jj++) { if (ii > 0) getlastrow(A[ii - 1][jj], tophalo); else clear(tophalo); if (jj > 0) getlastcol(A[ii][jj - 1], lefthalo); else clear(lefthalo); if (ii < NB - 1) getfirstrow(A[ii + 1][jj], bottomhalo); else clear(bottomhalo); if (jj < NB - 1) getfirstcol(A[ii][jj + 1], righthalo); else clear(lefthalo); jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]); } // jj } // ii } // end parallel delta = maxdelta(); printf("iteration %d: delta = %e\n", iters, delta); // yes, this is an inefficient copy // however, the library version requires you to do a copy in this way // on all of the component parts to avoid segmentation fault #pragma omp parallel for schedule(static) shared(A, A_new) for(int i = 0; i < NB; ++i) { for(int j = 0; j < NB; ++j) { for(int k = 0; k < B; ++k) for(int l = 0; l < B; ++l) A[i][j][k * B + l] = A_new[i][j][k * B + l]; } } } // iter } int main(int argc, char *argv[]) { int niters; // pp_time_t tm; // memset( &tm, 0, sizeof(tm) ); struct timespec start, end; if (argc > 1) { niters = atoi(argv[1]); } else niters = 1; alloc_and_genmat(); clock_gettime(CLOCK_MONOTONIC, &start); compute(niters); clock_gettime(CLOCK_MONOTONIC, &end); double time_taken = (end.tv_sec - start.tv_sec) * 1e9; time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9; printf("Running time = %g %s\n", time_taken, "s"); /* FILE *outFile; outFile = fopen("./jacobi_omp_values.txt", "w"); if (outFile == NULL) { fprintf(stderr, "Error writing to file\n"); } else { int ii, jj, i, j; for (ii = 0; ii < NB; ++ii) for (jj = 0; jj < NB; ++jj) for (i = 0; i < B; ++i) for (j = 0; j < B; ++j) fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]); fclose(outFile); } */ return 0; }
special_random_ops.h
// // @author raver119@gmail.com // #ifndef LIBND4J_SPECIAL_RANDOM_OPS_H #define LIBND4J_SPECIAL_RANDOM_OPS_H #include <ops/random_ops.h> namespace randomOps { ////////////////////////////////////////////////////////////////////// template<typename T> class Choice { public: method_idx method_X method_XY static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; __shared__ Nd4jLong xLength; __shared__ Nd4jLong yLength; __shared__ Nd4jLong zLength; __shared__ int xEWS; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); xLength = shape::length(xShapeBuffer); yLength = shape::length(yShapeBuffer); zLength = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { for (Nd4jLong e = tid; e < zLength; e+=blockDim.x * gridDim.x) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } __syncthreads(); } __syncthreads(); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; __shared__ int xRank; __shared__ int yRank; __shared__ int zRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); zRank = shape::rank(zShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); yShape = shape::shapeOf(yShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); xStride = shape::stride(xShapeBuffer); yStride = shape::stride(yShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); for (Nd4jLong i = tid; i < zLength; i+=blockDim.x * gridDim.x) { shape::ind2sub(zRank, zShape, i, zCoord); Nd4jLong zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); Nd4jLong yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } __syncthreads(); } __syncthreads(); } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; Nd4jLong yLength = shape::length(yShapeBuffer); Nd4jLong zLength = shape::length(zShapeBuffer); int xEWS = shape::elementWiseStride(xShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong e = 0; e < zLength; e++) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } } } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int zRank = shape::rank(zShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto yShape = shape::shapeOf(yShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto yStride = shape::stride(yShapeBuffer); auto zStride = shape::stride(zShapeBuffer); #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong i = 0; i < zLength; i++) { shape::ind2sub(zRank, zShape, i, zCoord); Nd4jLong zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); Nd4jLong yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within specified boundaries. Distribuion is Gaussian */ template<typename T> class GaussianDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ int zEWS; __shared__ int yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = (T) 1e-5; two_pi = (T) 2.0 * (T) 3.14159265358979323846; mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, (T) 1.0f); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, (T) 1.0f); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean; else z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean; __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = (T) 2.0 * 3.14159265358979323846; auto zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jLong e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, (T) 1e-5f, (T) 1.0f); u1 = buffer->relativeT<T>((e + 1), (T) 1e-5f, (T) 1.0f); lnU0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z0 * stddev + realMean; } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z1 * stddev + realMean; generated = false; } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistributionEx { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); int yEWS = shape::elementWiseStride(yShapeBuffer); int zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev] template<typename T> class TruncatedNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ int zEWS; __shared__ int yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = (T) 1e-6f; two_pi = (T) 2.0 * (T) 3.14159265358979323846; mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; T result0, result1, u0, u1, z0, z1; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jLong e = tid; e < middle; e += step) { // we need to get random values Nd4jLong generation0 = 0; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[(e + middle) * yEWS]; do { T u0 = buffer->relativeT<T>(e + generation0, epsilon, (T) 1.0f); T u1 = buffer->relativeT<T>(e + middle + generation0, epsilon, (T) 1.0f); z0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)) * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)) * nd4j::math::nd4j_sin<T>(two_pi * u1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (nd4j::math::nd4j_abs<T>(realMean0) + nd4j::math::nd4j_abs<T>(result0) > ds || nd4j::math::nd4j_abs<T>(realMean1) + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if((e+middle) < zLength) z[(e + middle) * zEWS] = result1; } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = (T) 2.0 * 3.14159265358979323846; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (middle / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > middle) { end = middle; } T z0, z1; T u0, u1; T result0, result1, lnu0; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jLong e = start; e < end; e++) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ Nd4jLong generation0 = 0; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[(e + middle) * yEWS]; do { u0 = buffer->relativeT<T>(e + generation0, (T) 1e-6f, (T) 1.0f); u1 = buffer->relativeT<T>((e + middle + generation0), (T) 1e-6f, (T) 1.0f); lnu0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)); z0 = lnu0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnu0 * nd4j::math::nd4j_sin<T>(two_pi * u1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (nd4j::math::nd4j_abs<T>(realMean0) + nd4j::math::nd4j_abs<T>(result0) > ds || nd4j::math::nd4j_abs<T>(realMean1) + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if((e+middle) < zLength) z[(e + middle) * zEWS] = result1; } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Log-normal distribution template<typename T> class LogNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ int zEWS; __shared__ int yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = (T *) (shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = (T) 1e-5; two_pi = (T) 2.0 * (T) 3.14159265358979323846; mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, (T) 1.0f); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, (T) 1.0f); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean); else z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean); __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = (T) 2.0 * 3.14159265358979323846; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jLong e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, (T) 1e-5f, (T) 1.0f); u1 = buffer->relativeT<T>((e + 1), (T) 1e-5f, (T) 1.0f); lnU0 = nd4j::math::nd4j_sqrt<T>((T) -2.0f * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z0 * stddev + realMean); } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z1 * stddev + realMean); generated = false; } } } // update rng state buffer->rewindH(zLength); } }; } #endif //LIBND4J_SPECIAL_RANDOM_OPS_H
task-taskwait-nested.c
/* * task-taskwait-nested.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include "ompt/ompt-signal.h" #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) { #pragma omp task shared(var, a) { // wait for master to pass the taskwait OMPT_SIGNAL(a); OMPT_WAIT(a, 2); var++; } } // Give other thread time to steal the task and execute its child. OMPT_WAIT(a, 1); // Only directly generated children are guaranteed to be executed. #pragma omp taskwait OMPT_SIGNAL(a); var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-taskwait-nested.c:34 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-taskwait-nested.c:44 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
game.h
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <time.h> #include <omp.h> #include "queue.h" #include "soundtracks.h" #include "colors.h" // A struct to keep track of the player score typedef struct score_tracking{ int *current_score; int highest_score; } st; void play(int difficulty, int num_of_colors, int reaction_time, int *score, int *score_multiplier, st record); // The gameplay resides here void input_and_check_answer(char input_answer[60], char answer[60], int *status,int round_flashes); // Check player's answer to the game's answer void game_difficulty(int difficulty, int *num_of_colors, int *reaction_time, int *score_multiplier); // Set up game based on the selected difficulty void color_flash(int round_flashes, int num_of_colors, int reaction_time); // Color flashing function to.., well.., flash colors void concatenate(char answer[60]); // Concatenate answers stored in queue to a string void display_score(int *score); // Display the player's current score void score_tracker(st record); // A score tracking function to.., well.., track the player's score void prompt(); // A prompt to the user to press the required key to continue int game(){ /* SYSTEM("COLOR XX") FORMAT: - FIRST DIGIT = BACKGROUND COLOR - SECOND DIGIT = FOREGROUND COLOR ex: system("color 10") = BLUE BACKGROUND AND BLACK FOREGROUND COLORS TO BE USED: - BLACK 0 - BLUE 1 - GREEN 2 - AQUA 3 - RED 4 - PURPLE 5 REACTION TIMES (in microseconds) AND COLORS: - EASY 900000 + BASE_REACTION_TIME & 3 - NORMAL 750000 + BASE_REACTION_TIME & 4 - HARD 550000 + BASE_REACTION_TIME & 4 - INSANE 400000 + BASE_REACTION_TIME & 5 */ srand(time(NULL)); int i, score = 0, highscore = 0, hs, score_multiplier, difficulty, num_of_colors, reaction_time; char highest_score_in_file[30]; FILE *fp = fopen("sorted.txt", "r"); if (fp == NULL) { fp = fopen("sorted.txt", "w+"); if (fp == NULL){ printf("Cannot open sorted.txt \n"); exit(0); } } fgets(highest_score_in_file, 30, fp); fclose(fp); hs = atoi(highest_score_in_file); // Store highest score in file in hs // Call menu to display the difficulties and ask the player to select it difficulty = menu(1) + 1; printf ("\n---------------------------------------------------------------------------------------------------\n"); system("cls"); #pragma omp parallel for schedule(static) reduction (+ : highscore) // Iterate each thread to sum highscore until the same as highscore for(i = 0; i < hs; i++){ highscore += 1; } struct score_tracking record; record.current_score = &score; record.highest_score = highscore; // Setting up game by the selected difficulty game_difficulty(difficulty, &num_of_colors, &reaction_time, &score_multiplier); // Commencing game play(difficulty, num_of_colors, reaction_time, &score, &score_multiplier, record); return score; } void game_difficulty(int difficulty, int *num_of_colors, int *reaction_time, int *score_multiplier){ // Number of colors, reaction time, and score multiplier vary depending on the difficulty switch(difficulty){ case 1: *num_of_colors = 3; *reaction_time = 620000; *score_multiplier = 20; break; case 2: *num_of_colors = 4; *reaction_time = 490000; *score_multiplier = 40; break; case 3: *num_of_colors = 4; *reaction_time = 340000; *score_multiplier = 70; break; case 4: *num_of_colors = 5; *reaction_time = 200000; *score_multiplier = 100; break; } } void color_flash(int round_flashes, int num_of_colors, int reaction_time){ int prev_color = -1, current_color = -1; do{ // Do not have a repeated color flashing while(prev_color == current_color) current_color = rand() % num_of_colors; prev_color = current_color; add_answer(current_color); // Add the current color to the answer queue // COLOR FLASH TIME !! switch(current_color){ case 0: // Blue blue_flash(reaction_time); break; case 1: // Green green_flash(reaction_time); break; case 2: // Aqua aqua_flash(reaction_time); break; case 3: // Red red_flash(reaction_time); break; case 4: // Purple purple_flash(reaction_time); break; } round_flashes--; }while(round_flashes != 0); } void play(int difficulty, int num_of_colors, int reaction_time, int *score, int *score_multiplier, st record){ int i, round_flashes = 3, round_counter = 1, status = 1; // <- Game's status (correct guess or wrong guess) char diff[6], answer[60], input_answer[60]; if(difficulty == 1) strcpy(diff, "EASY"); else if(difficulty == 2) strcpy(diff, "NORMAL"); else if(difficulty == 3) strcpy(diff, "HARD"); else if(difficulty == 4) strcpy(diff, "INSANE"); // State of play while(status == 1){ system("color 06"); // Black background yellow foreground display_score(score); if(round_counter % 6 == 0){ // Every 6 rounds, player gets a congrats message from the program. How nice! #pragma omp parallel { printf("\n\t\t CONGRATS ON PASSING LEVEL %d !!! - FROM THREAD %d OF %d", round_counter - 1, omp_get_thread_num(), omp_get_num_threads()); #pragma omp barrier #pragma omp critical printf("\n\t\t\t Keep up the good work !!\n"); } check_jingle(); } score_tracker(record); // Tell the player how they are doing on score printf("\t ------------------------------------------------------------\n"); printf("\n\t\t\t\t ** ROUND %d - %s **\n", round_counter, diff); printf("\t\t\t\t %d Flashes\n", round_flashes); printf("\n\t\t ------------------------------------------------------------"); prompt(); system("cls"); color_flash(round_flashes, num_of_colors, reaction_time); // Start flashing concatenate(answer); // Concatenate answer in queue and store it answer array input_and_check_answer(input_answer, answer, &status,round_flashes); // Make the player input their answer and check it #pragma omp single nowait { #pragma omp task *score += round_flashes * *score_multiplier * status; } #pragma omp taskwait round_counter++; round_flashes++; system("cls"); } } void input_and_check_answer(char input_answer[60], char answer[60], int *status,int round_flashes){ int i; char character_answer; // char to store each player answer input system("color 06"); printf("\t\t ------------------------------------------------------------"); // Iteration to assign character_answer to each input_answer element for(i = 0; i < round_flashes;i++){ character_answer = menu(2) + '0'; // Assign char by calling menu input_answer[i] = character_answer; // Assign character_answer to each input_answer element } printf("Your answer: %s\n", input_answer); display_answer(); getchar(); if (strcmp(input_answer, answer) == 0){ // If the player's answer is correct, then... printf("\n\n\t\t ------------------------------------------------------------"); printf("\n\t\t\t Correct! Get ready for the next round"); correct_jingle(); printf("\n\t\t ------------------------------------------------------------"); prompt(); }else{ // If the player's answer is wrong, then... printf("\n\n\t\t ------------------------------------------------------------"); printf ("\n\t\t\t Wrong!"); wrong_jingle(); printf ("\n\t\t\t Thank you for playing!"); printf("\n\t\t ------------------------------------------------------------"); prompt(); system("cls"); *status = 0; // Change status to 0 so the program stops playing } free_answer(); // Free the program's answer for the next add_answer() call } void concatenate(char answer[60]){ Node *var = rear; char s1[30], s2[30]; // Two arrays to store each answer and concatenate the latter to the former if(var != NULL){ sprintf(s1, "%d", var->answer); // Change int in var->answer to string to s1 while(var->next != NULL){ sprintf(s2, "%d", var->next->answer); // Change int in var->next->answer to string to s2 strcat(s1, s2); // Concatenate s2 to s1 strcpy(answer, s1); // Copy s1 to answer var = var->next; // Point to the next answer in queue } } } void prompt(){ printf("\n\t\t\t Press the ENTER key to continue..."); fflush(stdin); getchar(); } void display_score(int *score){ printf("Score: %d", *score); } void score_tracker(st record){ if(*(record.current_score) >= record.highest_score){ printf("\nCongratulations! You have achieved a new highscore.\n"); }else{ printf("\nYou are %d points away from the current highscore!\n", record.highest_score - *(record.current_score)); } }
openmp.c
/* OMP Implementation Parralelising recBitonicSort, impBitonicSort */ #include <stdio.h> #include <time.h> #include <stdlib.h> #include <omp.h> #include <sys/time.h> //for real time #define MAX_THREADS 256 #define MIN_Q 12 #define MAX_Q 24 #define MIN_P 0 #define MAX_P 8 struct timeval startwtime, endwtime; double seq_time; int activeThreads = 0,maxThreads; omp_lock_t writelock; int N; // data array size int *a; // data array to be sorted const int ASCENDING = 1; const int DESCENDING = 0; void init(void); void print(void); void sort(void); void test(void); inline void exchange(int i, int j); inline void compare(int i, int j, int dir); void bitonicMerge(int lo, int cnt, int dir); void recBitonicSort(int lo, int cnt, int dir); void impBitonicSort(void); int cmpfunc_ascending (const void * a, const void * b); int cmpfunc_descending (const void * a, const void * b); int main(int argc, char **argv) { int p,q; srand(time(NULL)); if(argc !=3) { printf("How to use: %s p q\n where p is the 2^p no. of threads\n where 2^q is problem size (power of two)\n", argv[0]); exit(1); } p = atoi(argv[1]); maxThreads = 1 << p; if ( ( maxThreads < 1 ) || ( maxThreads > MAX_THREADS ) ) { printf("p should be between %d and %d.\n", MIN_P, MAX_P); exit(2); } q = atoi(argv[2]); if( ( q < 12 ) || ( q > 24 ) ){ printf("q should be between %d and %d.\n", MIN_Q, MAX_Q); exit(3); } N = 1 << q; //creating array in main thread a = (int *) malloc(N * sizeof(int)); //Imperative OMP sort init(); printf("\nImperative OMP Implementation:\n"); gettimeofday (&startwtime, NULL); impBitonicSort(); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("Imperative OMP Wall clock time = %f\n", seq_time); test(); //Recursive OMP Parallel sort omp_init_lock(&writelock); init(); printf("\nRecursive OMP Parralel Implementation:\n"); gettimeofday (&startwtime, NULL); sort(); gettimeofday (&endwtime, NULL); omp_destroy_lock(&writelock); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("Parallel OMP Recursive Wall clock time: %f\n", seq_time); test(); //Qsort sort init(); printf("\nqsort Implementation:\n"); gettimeofday (&startwtime, NULL); qsort(a, N, sizeof(int), cmpfunc_ascending ); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("qsort Wall clock time: %f\n", seq_time); test(); return 0; } /** -------------- SUB-PROCEDURES ----------------- **/ /** procedure test() : verify sort results **/ void test() { int pass = 1; int i; for (i = 1; i < N; i++) { pass &= (a[i-1] <= a[i]); } printf("TEST %s\n",(pass) ? "PASSed" : "FAILed"); } /** procedure init() : initialize array "a" with data **/ void init() { int i; //resetting thread usage to zero for next implementation activeThreads = 0; for (i = 0; i < N; i++) { a[i] = rand() % N; // (N - i); } } /** procedure print() : print array elements **/ void print() { int i; for (i = 0; i < N; i++) { printf("%d\n", a[i]); } printf("\n"); } /** INLINE procedure exchange() : pair swap **/ inline void exchange(int i, int j) { int t; t = a[i]; a[i] = a[j]; a[j] = t; } /** procedure compare() The parameter dir indicates the sorting direction, ASCENDING or DESCENDING; if (a[i] > a[j]) agrees with the direction, then a[i] and a[j] are interchanged. **/ inline void compare(int i, int j, int dir) { if (dir==(a[i]>a[j])) exchange(i,j); } /** Procedure bitonicMerge() It recursively sorts a bitonic sequence in ascending order, if dir = ASCENDING, and in descending order otherwise. The sequence to be sorted starts at index position lo, the parameter cnt is the number of elements to be sorted. **/ void bitonicMerge(int lo, int cnt, int dir) { if ( cnt > 1 ) { int k = cnt / 2 ; int i; for (i=lo; i<(lo+k); i++) compare(i, i+k, dir); bitonicMerge(lo,k,dir); bitonicMerge(lo+k,k,dir); } } /** function recBitonicSort() first produces a bitonic sequence by recursively sorting its two halves in opposite sorting orders, and then calls bitonicMerge to make them in the same order **/ void recBitonicSort(int lo, int cnt, int dir) { if ( cnt > 1 ) { int k = cnt / 2; int parallel = 0; //if the subarrays are less than 100 elements long //we use q sort for faster sorting if( (k-lo)>100 ){ qsort(a+lo, k, sizeof(int), cmpfunc_ascending ); qsort(a+lo+k, k, sizeof(int), cmpfunc_descending ); } else if (activeThreads < maxThreads - 2) { omp_set_lock(&writelock); activeThreads += 2 ; omp_unset_lock(&writelock); //the remaining parallelism needs to run in the available threads omp_set_num_threads(maxThreads - activeThreads); #pragma omp parallel { #pragma omp sections { #pragma omp section recBitonicSort(lo, k, ASCENDING); #pragma omp section recBitonicSort(lo+k, k, DESCENDING); } } parallel = 1; } //Child threads will cease to exist once its completed //so we measure this descreasion if(parallel){ omp_set_lock(&writelock); activeThreads-=2; omp_unset_lock(&writelock); } else{ qsort(a + lo, k, sizeof(int), cmpfunc_ascending ); qsort(a + lo + k, k, sizeof(int), cmpfunc_descending ); } bitonicMerge(lo, cnt, dir); } } /** function sort() Caller of recBitonicSort for sorting the entire array of length N in ASCENDING order **/ void sort() { recBitonicSort(0, N, ASCENDING); } /* imperative version of bitonic sort */ void impBitonicSort() { int i,j,k; omp_set_num_threads(maxThreads - activeThreads); int chunk = (N/(maxThreads - activeThreads) ); for (k=2; k<=N; k=2*k) { for (j=k>>1; j>0; j=j>>1) { #pragma omp parallel for schedule(static,chunk) for (i=0; i<N; i++) { int ij=i^j; if ((ij)>i) { if ((i&k)==0 && a[i] > a[ij]) exchange(i,ij); if ((i&k)!=0 && a[i] < a[ij]) exchange(i,ij); } } } } } /*for qsort*/ int cmpfunc_ascending (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } int cmpfunc_descending (const void * a, const void * b) { return ( *(int*)b - *(int*)a ); } //from: https://www.tutorialspoint.com/c_standard_library/c_function_qsort.htm
dnnl_utils_avx512.h
//===- dnnl_utils_avx512.h ------------------------------------------------===// // // Copyright (C) 2019-2020 Alibaba Group Holding Limited. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================= #include <immintrin.h> #include <omp.h> namespace dnnl_utils { static int calculat_offset(int len, int vec_size) { /* calculate the offset when using intrinsics. example: when len is 108 vec_size is 32 when using bf16 the result is 108 % 32 = 12 so we need to set the mask to 0b00000000000000000000111111111111 */ int offset = len; int expo = 0; int dst = 0; while (offset - vec_size > 0) { offset -= vec_size; } while (offset > 0) { dst += pow(2, expo); offset -= 1; expo += 1; } return dst; } #if defined(__GNUC__) && (__GNUC__ > 9) inline void binary_s32_func(dnnl::algorithm alg, int32_t* lhs, int32_t* rhs, int32_t* dst, int len) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; __m512i (*__mm512_binary_op)(__m512i, __m512i); switch (alg) { case dnnl::algorithm::binary_add: __mm512_binary_op = [](__m512i a, __m512i b) { return _mm512_add_epi32(a, b); }; break; case dnnl::algorithm::binary_mul: __mm512_binary_op = [](__m512i a, __m512i b) { return _mm512_mul_epi32(a, b); }; break; default: break; } for (; i <= len - vec_size; i += vec_size) { auto a1 = _mm512_loadu_epi32(lhs + i); auto b1 = _mm512_loadu_epi32(rhs + i); auto out1 = __mm512_binary_op(a1, b1); _mm512_mask_storeu_epi32(dst + i, mask16, out1); } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a1 = _mm512_maskz_loadu_epi32(tail_mask, lhs + i); auto b1 = _mm512_maskz_loadu_epi32(tail_mask, rhs + i); auto out1 = __mm512_binary_op(a1, b1); _mm512_mask_storeu_epi32(dst + i, tail_mask, out1); } } #else inline void binary_s32_func(dnnl::algorithm alg, int32_t* lhs, int32_t* rhs, int32_t* dst, int len) { assert(0); } #endif inline __m512 _mm512_cvtbf16f32_load(__mmask16 mask, void* mem_addr) { auto dst = _mm512_slli_epi32( _mm512_cvtepu16_epi32(_mm256_maskz_loadu_epi16(mask, mem_addr)), 0x10); return _mm512_castsi512_ps(dst); } inline void gather_func(char* params, int32_t* idx, size_t batch_size, size_t idx_size, size_t axis_size, size_t inner_size, size_t byte_size, char* dst) { size_t slice_bytes = inner_size * byte_size; #pragma omp parallel for for (int i = 0; i < batch_size; i++) { for (int j = 0; j < idx_size; j++) { int32_t curr_idx = idx[j]; memcpy(dst + (i * idx_size + j) * slice_bytes, params + (i * axis_size + curr_idx) * slice_bytes, slice_bytes); } } } #if defined(__GNUC__) && (__GNUC__ > 9) inline void floorbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 16; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto a1 = _mm512_cvtbf16f32_load(mask16, src + i + 16); auto out0 = _mm512_floor_ps(a0); auto out1 = _mm512_floor_ps(a1); auto C_bf16 = _mm512_cvtne2ps_pbh(out1, out0); _mm512_mask_storeu_ps(dst + i / 2, mask16, _mm512_castsi512_ps(C_bf16)); } if ((len - i) > 16) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i / 2, _mm256_castsi256_ps(C_bf16)); i += vec_size / 2; } if (len - i) { __mmask16 tail_mask = calculat_offset(i, vec_size); auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #elif defined(__GNUC__) && (__GNUC__ > 8) inline void floorbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); auto tail_mask = calculat_offset(len, vec_size); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i, _mm256_castsi256_ps(C_bf16)); } if (len - i) { auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_floor_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #else inline void floorbf_func(int len, int16_t* src, float* dst) { assert(0); } #endif inline void floorf_func(int len, float* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; for (; i <= len - vec_size; i += vec_size) { auto a1 = _mm512_loadu_ps(src + i); auto out1 = _mm512_floor_ps(a1); _mm512_mask_storeu_ps(dst + i, mask16, out1); } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a1 = _mm512_maskz_loadu_ps(tail_mask, src + i); auto out1 = _mm512_floor_ps(a1); _mm512_mask_storeu_ps(dst + i, tail_mask, out1); } } inline void rsqrtf_func(int len, float* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; for (; i <= len - vec_size; i += vec_size) { auto a1 = _mm512_loadu_ps(src + i); auto out1 = _mm512_rsqrt14_ps(a1); _mm512_mask_storeu_ps(dst + i, mask16, out1); } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a1 = _mm512_maskz_loadu_ps(tail_mask, src + i); auto out1 = _mm512_rsqrt14_ps(a1); _mm512_mask_storeu_ps(dst + i, tail_mask, out1); } } #if defined(__GNUC__) && (__GNUC__ > 9) inline void rsqrtbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 16; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto a1 = _mm512_cvtbf16f32_load(mask16, src + i + 16); auto out0 = _mm512_rsqrt14_ps(a0); auto out1 = _mm512_rsqrt14_ps(a1); auto C_bf16 = _mm512_cvtne2ps_pbh(out1, out0); _mm512_mask_storeu_ps(dst + i / 2, mask16, _mm512_castsi512_ps(C_bf16)); } if ((len - i) > 16) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i / 2, _mm256_castsi256_ps(C_bf16)); i += 16; } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #elif defined(__GNUC__) && (__GNUC__ > 8) inline void rsqrtbf_func(int len, int16_t* src, float* dst) { int i = 0; int vec_size = 512 / 32; __mmask16 mask16 = 0xFFFF; auto alpha_vec = _mm512_set1_ps(0.0); auto tail_mask = calculat_offset(len, vec_size); for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i, _mm256_castsi256_ps(C_bf16)); } if (len - i) { auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = _mm512_rsqrt14_ps(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } } #else inline void rsqrtbf_func(int len, int16_t* src, float* dst) {} #endif #if defined(__GNUC__) && (__GNUC__ > 9) static inline __m512 pexp(const __m512& _x) { __m512 p16f_1 = _mm512_set1_ps(1.0f); __m512 p16f_half = _mm512_set1_ps(0.5f); __m512 p16f_127 = _mm512_set1_ps(127.f); __m512 p16f_exp_hi = _mm512_set1_ps(88.3762626647950f); __m512 p16f_exp_lo = _mm512_set1_ps(-88.3762626647949f); __m512 p16f_cephes_LOG2EF = _mm512_set1_ps(1.44269504088896341f); __m512 p16f_cephes_exp_p0 = _mm512_set1_ps(1.9875691500E-4f); __m512 p16f_cephes_exp_p1 = _mm512_set1_ps(1.3981999507E-3f); __m512 p16f_cephes_exp_p2 = _mm512_set1_ps(8.3334519073E-3f); __m512 p16f_cephes_exp_p3 = _mm512_set1_ps(4.1665795894E-2f); __m512 p16f_cephes_exp_p4 = _mm512_set1_ps(1.6666665459E-1f); __m512 p16f_cephes_exp_p5 = _mm512_set1_ps(5.0000001201E-1f); // Clamp x. __m512 x = _mm512_max_ps(_mm512_min_ps(_x, p16f_exp_hi), p16f_exp_lo); // Express exp(x) as exp(m*ln(2) + r), start by extracting // m = floor(x/ln(2) + 0.5). __m512 m = _mm512_floor_ps(_mm512_fmadd_ps(x, p16f_cephes_LOG2EF, p16f_half)); // Get r = x - m*ln(2). If no FMA instructions are available, m*ln(2) is // subtracted out in two parts, m*C1+m*C2 = m*ln(2), to avoid accumulating // truncation errors. Note that we don't use the "pmadd" function here to // ensure that a precision-preserving FMA instruction is used. __m512 p16f_nln2 = _mm512_set1_ps(-0.6931471805599453f); __m512 r = _mm512_fmadd_ps(m, p16f_nln2, x); __m512 r2 = _mm512_mul_ps(r, r); // TODO(gonnet): Split into odd/even polynomials and try to exploit // instruction-level parallelism. __m512 y = p16f_cephes_exp_p0; y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p1); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p2); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p3); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p4); y = _mm512_fmadd_ps(y, r, p16f_cephes_exp_p5); y = _mm512_fmadd_ps(y, r2, r); y = _mm512_add_ps(y, p16f_1); // Build emm0 = 2^m. __m512i emm0 = _mm512_cvttps_epi32(_mm512_add_ps(m, p16f_127)); emm0 = _mm512_slli_epi32(emm0, 23); // Return 2^m * exp(r). return _mm512_max_ps(_mm512_mul_ps(y, _mm512_castsi512_ps(emm0)), _x); }; static inline __m512 erf_avx512(const __m512& src512) { const __m512 coeff0 = _mm512_set1_ps(+7.853861353153693E-5); const __m512 coeff1 = _mm512_set1_ps(-8.010193625184903E-4); const __m512 coeff2 = _mm512_set1_ps(+5.188327685732524E-3); const __m512 coeff3 = _mm512_set1_ps(-2.685381193529856E-2); const __m512 coeff4 = _mm512_set1_ps(+1.128358514861418E-1); const __m512 coeff5 = _mm512_set1_ps(-3.761262582423300E-1); const __m512 coeff6 = _mm512_set1_ps(+1.128379165726710E+0); __m512 dst512; __m512 base512 = _mm512_mul_ps(src512, src512); dst512 = _mm512_fmadd_ps(coeff0, base512, coeff1); dst512 = _mm512_fmadd_ps(dst512, base512, coeff2); dst512 = _mm512_fmadd_ps(dst512, base512, coeff3); dst512 = _mm512_fmadd_ps(dst512, base512, coeff4); dst512 = _mm512_fmadd_ps(dst512, base512, coeff5); dst512 = _mm512_fmadd_ps(dst512, base512, coeff6); dst512 = _mm512_mul_ps(dst512, src512); return dst512; } static inline __m512 erfc_avx512(const __m512& src512) { const __m512 Pcoeff0 = _mm512_set1_ps(+2.326819970068386E-2); const __m512 Pcoeff1 = _mm512_set1_ps(-1.387039388740657E-1); const __m512 Pcoeff2 = _mm512_set1_ps(+3.687424674597105E-1); const __m512 Pcoeff3 = _mm512_set1_ps(-5.824733027278666E-1); const __m512 Pcoeff4 = _mm512_set1_ps(+6.210004621745983E-1); const __m512 Pcoeff5 = _mm512_set1_ps(-4.944515323274145E-1); const __m512 Pcoeff6 = _mm512_set1_ps(+3.404879937665872E-1); const __m512 Pcoeff7 = _mm512_set1_ps(-2.741127028184656E-1); const __m512 Pcoeff8 = _mm512_set1_ps(+5.638259427386472E-1); const __m512 Rcoeff0 = _mm512_set1_ps(-1.047766399936249E+1); const __m512 Rcoeff1 = _mm512_set1_ps(+1.297719955372516E+1); const __m512 Rcoeff2 = _mm512_set1_ps(-7.495518717768503E+0); const __m512 Rcoeff3 = _mm512_set1_ps(+2.921019019210786E+0); const __m512 Rcoeff4 = _mm512_set1_ps(-1.015265279202700E+0); const __m512 Rcoeff5 = _mm512_set1_ps(+4.218463358204948E-1); const __m512 Rcoeff6 = _mm512_set1_ps(-2.820767439740514E-1); const __m512 Rcoeff7 = _mm512_set1_ps(+5.641895067754075E-1); const __m512 one = _mm512_set1_ps(1.0); const __m512 two = _mm512_set1_ps(2.0); const __m512 zero = _mm512_set1_ps(0.0); const __m512 MinorMaxlog = _mm512_set1_ps(-88.72283905206835); __m512 abssrc = _mm512_abs_ps(src512); __m512 nabssrc = _mm512_sub_ps(zero, abssrc); __m512 v = _mm512_mul_ps(abssrc, nabssrc); __m512 z = pexp(v); __m512 q = _mm512_div_ps(one, abssrc); __m512 y = _mm512_mul_ps(q, q); __mmask16 PCoeff_mask = _mm512_cmp_ps_mask(abssrc, two, _CMP_LT_OQ); // < 2 __mmask16 RCoeff_mask = ~PCoeff_mask; __m512 pP; __m512 pR; if (PCoeff_mask) { pP = _mm512_fmadd_ps(Pcoeff0, y, Pcoeff1); pP = _mm512_fmadd_ps(pP, y, Pcoeff2); pP = _mm512_fmadd_ps(pP, y, Pcoeff3); pP = _mm512_fmadd_ps(pP, y, Pcoeff4); pP = _mm512_fmadd_ps(pP, y, Pcoeff5); pP = _mm512_fmadd_ps(pP, y, Pcoeff6); pP = _mm512_fmadd_ps(pP, y, Pcoeff7); pP = _mm512_fmadd_ps(pP, y, Pcoeff8); } if (RCoeff_mask) { pR = _mm512_fmadd_ps(Rcoeff0, y, Rcoeff1); pR = _mm512_fmadd_ps(pR, y, Rcoeff2); pR = _mm512_fmadd_ps(pR, y, Rcoeff3); pR = _mm512_fmadd_ps(pR, y, Rcoeff4); pR = _mm512_fmadd_ps(pR, y, Rcoeff5); pR = _mm512_fmadd_ps(pR, y, Rcoeff6); pR = _mm512_fmadd_ps(pR, y, Rcoeff7); } pP = _mm512_mask_mov_ps(pP, RCoeff_mask, pR); // y = z * q * p; // float y_clamp = z < -kMaxlog ? 0 : y; // return x < 0 ? 2 - y_clamp : y_clamp; y = _mm512_mul_ps(z, q); y = _mm512_mul_ps(y, pP); __mmask16 y_clamp_mask = _mm512_cmp_ps_mask(z, MinorMaxlog, _CMP_LT_OQ); __m512 y_clamp = _mm512_mask_mov_ps(y, y_clamp_mask, zero); __mmask16 x_mask = _mm512_cmp_ps_mask(src512, zero, _CMP_LT_OQ); __m512 y_clamp2 = _mm512_sub_ps(two, y_clamp); y = _mm512_mask_mov_ps(y_clamp, x_mask, y_clamp2); y = _mm512_sub_ps(one, y); return y; } #endif template <typename T, typename Q> inline void splitter(const T& n, const Q& team, const Q& tid, T& n_start, T& n_end) { if (team <= 1 || n == 0) { n_start = 0; n_end = n; } else { T n1 = (n + (T)team - 1) / (T)team; T n2 = n1 - 1; T T1 = n - n2 * (T)team; n_end = (T)tid < T1 ? n1 : n2; n_start = (T)tid <= T1 ? tid * n1 : T1 * n1 + ((T)tid - T1) * n2; } n_end += n_start; } template <typename T0, typename F> void for_1d(const int& ithr, const int& nthr, const T0& D0, const F& func) { T0 d0{0}, end{0}; splitter(D0, nthr, ithr, d0, end); for (; d0 < end; ++d0) func(d0); } template <typename T0, typename F> void parallel_for(const T0& D0, const F& func) { #pragma omp parallel for_1d(omp_get_thread_num(), omp_get_num_threads(), D0, func); } inline bool parallel_it_step() { return true; } template <typename Q, typename R, typename... Args> inline bool parallel_it_step(Q& x, const R& X, Args&&... tuple) { if (parallel_it_step(static_cast<Args>(tuple)...)) { x = (x + 1) % X; return x == 0; } return false; } template <typename T> inline T parallel_it_init(T start) { return start; } template <typename T, typename Q, typename R, typename... Args> inline T parallel_it_init(T start, Q& x, const R& X, Args&&... tuple) { start = parallel_it_init(start, static_cast<Args>(tuple)...); x = start % X; return start / X; } template <typename T0, typename T1, typename F> void for_2d(const int& ithr, const int& nthr, const T0& D0, const T1& D1, const F& func) { const size_t work_amount = (size_t)D0 * D1; if (work_amount == 0) return; size_t start{0}, end{0}; splitter(work_amount, nthr, ithr, start, end); T0 d0{0}; T1 d1{0}; parallel_it_init(start, d0, D0, d1, D1); for (size_t iwork = start; iwork < end; ++iwork) { func(d0, d1); parallel_it_step(d0, D0, d1, D1); } } template <typename T0, typename T1, typename F> void parallel_for2d(const T0& D0, const T1& D1, const F& func) { #pragma omp parallel for_2d(omp_get_thread_num(), omp_get_num_threads(), D0, D1, func); } const int block_size = 16; typedef __m512 vec_type_f; typedef __m512i vec_type_i; typedef __mmask16 vmask_type; using SizeVector = std::vector<int>; inline int count(SizeVector dims, int start_ind, int end_ind) { size_t count = 1; for (size_t i = start_ind; i < end_ind; i++) count *= dims[i]; return static_cast<int>(count); } inline int count(SizeVector dims, size_t start_ind = 0) { return count(dims, start_ind, dims.size()); } static inline void _mm_uni_storeu_ps(float* pdst, const __m512& vec) { _mm512_storeu_ps(pdst, vec); } static inline void _mm_uni_storeu_si(void* pdst, const __m512i vec) { _mm512_storeu_si512(pdst, vec); } static inline __mmask16 _mm_uni_cmpgt_i32(__m512i vec0, __m512i vec1) { return _mm512_cmp_epi32_mask(vec1, vec0, 1); } static inline __mmask16 _mm_uni_cmpgt_ps(__m512 vec0, __m512 vec1) { return _mm512_cmp_ps_mask(vec0, vec1, 14); } static inline __m512 _mm_uni_any_ps() { return __m512{}; } static inline __m512i _mm_uni_any_epi32() { return __m512i{}; } static inline __m512i _mm_uni_set1_epi32(int value) { return _mm512_mask_set1_epi32(_mm_uni_any_epi32(), (__mmask16)-1, value); } static inline __m512i _mm_uni_setzero_si() { return _mm512_setzero_si512(); } static inline __m512 _mm_uni_blendv_ps(__m512 vec0, __m512 vec1, __m512 vmask) { return _mm512_mask_blend_ps( _mm512_cmpneq_epi32_mask(_mm512_castps_si512(vmask), _mm_uni_set1_epi32(0)), vec0, vec1); } static inline __m512 _mm_uni_blendv_ps(__m512 vec0, __m512 vec1, __mmask16 vmask) { return _mm512_mask_blend_ps(vmask, vec0, vec1); } struct cmpgt_ps { static inline vmask_type cmp_ps(const __m512 _Left, const __m512 _Right) { return _mm_uni_cmpgt_ps(_Left, _Right); } }; struct cmplt_ps { static inline vmask_type cmp_ps(const __m512 _Left, const __m512 _Right) { return _mm_uni_cmpgt_ps(_Right, _Left); } }; static inline __m512 _mm_uni_loadu_ps(const float* psrc) { return _mm512_mask_loadu_ps(_mm_uni_any_ps(), (__mmask16)-1, psrc); } template <class Compare1, template <typename> class Compare2> void top1_axis(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { int after_num = count(in_dims, axis + 1, in_dims.size()); int first_index = 0; parallel_for2d(before_num, after_num / block_size, [&](int i0, int ib1) { int s_index = i0 * dim * after_num + ib1 * block_size; vec_type_f vmax_val = _mm_uni_loadu_ps(src_data + s_index); vec_type_i vindex_max_val = _mm_uni_setzero_si(); for (int i2 = 1; i2 < dim; i2++) { s_index += after_num; vec_type_f vsrc = _mm_uni_loadu_ps(src_data + s_index); vmask_type vmask = Compare1::cmp_ps(vsrc, vmax_val); vmax_val = _mm_uni_blendv_ps(vmax_val, vsrc, vmask); vec_type_i vindex_cur_val = _mm_uni_set1_epi32(i2); vindex_max_val = _mm512_mask_blend_epi32(vmask, vindex_max_val, vindex_cur_val); } if (dst_data) _mm_uni_storeu_ps(dst_data + i0 * after_num + ib1 * block_size, vmax_val); if (dst_idx) _mm_uni_storeu_si(reinterpret_cast<vec_type_i*>(dst_idx + i0 * after_num + ib1 * block_size), vindex_max_val); }); first_index = after_num / block_size * block_size; int rest = after_num - first_index; parallel_for2d(before_num, rest, [&](int i0, int i1) { int index_max_val = 0; int s_index = i0 * dim * after_num + first_index + i1; float max_val = src_data[s_index]; for (int i2 = 1; i2 < dim; i2++) { s_index += after_num; if (Compare2<float>()(src_data[s_index], max_val)) { max_val = src_data[s_index]; index_max_val = i2; } } if (dst_data) dst_data[i0 * after_num + first_index + i1] = max_val; if (dst_idx) dst_idx[i0 * after_num + first_index + i1] = index_max_val; }); } template <template <typename> class Compare> void top1(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { parallel_for(before_num, [&](int i0) { int index_max_val = 0; int s_index = i0 * dim; float max_val = src_data[s_index]; for (int i1 = 1; i1 < dim; i1++) { s_index++; if (Compare<float>()(src_data[s_index], max_val)) { max_val = src_data[s_index]; index_max_val = i1; } } if (dst_data) dst_data[i0] = max_val; if (dst_idx) dst_idx[i0] = index_max_val; }); } template <class Compare1, template <typename> class Compare2> void topk_axis(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { int after_num = count(in_dims, axis + 1, in_dims.size()); int first_index = 0; if (src_k < count_vec) { parallel_for2d(before_num, after_num / block_size, [&](int i0, int ib1) { const int N = 32; vec_type_f vmax_values[N]; vec_type_i vmax_indexes[N]; vec_type_f vtmp; vec_type_i vtmp_indexes; vmask_type vmask; int s_index = i0 * dim * after_num + ib1 * block_size; auto vswap_func = [&](int index1, int index2) { vtmp = vmax_values[index1]; vmax_values[index1] = _mm_uni_blendv_ps(vmax_values[index1], vmax_values[index2], vmask); vmax_values[index2] = _mm_uni_blendv_ps(vmax_values[index2], vtmp, vmask); vtmp_indexes = vmax_indexes[index1]; vmax_indexes[index1] = _mm512_mask_blend_epi32( vmask, vmax_indexes[index1], vmax_indexes[index2]); vmax_indexes[index2] = _mm512_mask_blend_epi32(vmask, vmax_indexes[index2], vtmp_indexes); }; for (int i2 = 0; i2 < src_k; i2++) { vmax_values[i2] = _mm_uni_loadu_ps(src_data + s_index); vmax_indexes[i2] = _mm_uni_set1_epi32(i2); s_index += after_num; } for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { vmask = Compare1::cmp_ps(vmax_values[i3], vmax_values[i3 - 1]); if (vmask) vswap_func(i3, i3 - 1); } } for (int i2 = src_k; i2 < dim; i2++) { vmax_values[src_k] = _mm_uni_loadu_ps(src_data + s_index); vmax_indexes[src_k] = _mm_uni_set1_epi32(i2); for (int i3 = src_k; i3 > 0; i3--) { vmask = Compare1::cmp_ps(vmax_values[i3], vmax_values[i3 - 1]); if (vmask) vswap_func(i3, i3 - 1); else break; } s_index += after_num; } if (!sort_value) { for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { vmask = _mm_uni_cmpgt_i32(vmax_indexes[i3 - 1], vmax_indexes[i3]); if (vmask) vswap_func(i3, i3 - 1); else break; } } } if (dst_data) { for (int i2 = 0; i2 < src_k; i2++) _mm_uni_storeu_ps( dst_data + (i0 * src_k + i2) * after_num + ib1 * block_size, vmax_values[i2]); } if (dst_idx) { for (int i2 = 0; i2 < src_k; i2++) _mm_uni_storeu_si( reinterpret_cast<vec_type_i*>( dst_idx + (i0 * src_k + i2) * after_num + ib1 * block_size), vmax_indexes[i2]); } }); first_index = after_num / block_size * block_size; } int rest = after_num - first_index; parallel_for2d(before_num, rest, [&](int i0, int i1) { std::vector<float> max_values(src_k + 1); std::vector<int> max_indexes(src_k + 1); float tmp_value; int tmp_index; int s_index = i0 * dim * after_num + first_index + i1; auto swap_func = [&](int index1, int index2) { tmp_value = max_values[index1]; max_values[index1] = max_values[index2]; max_values[index2] = tmp_value; tmp_index = max_indexes[index1]; max_indexes[index1] = max_indexes[index2]; max_indexes[index2] = tmp_index; }; for (int i2 = 0; i2 < src_k; i2++) { max_values[i2] = src_data[s_index]; max_indexes[i2] = i2; s_index += after_num; } for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (Compare2<float>()(max_values[i3], max_values[i3 - 1])) { swap_func(i3, i3 - 1); } } } for (int i2 = src_k; i2 < dim; i2++) { max_values[src_k] = src_data[s_index]; max_indexes[src_k] = i2; for (int i3 = src_k; i3 > 0; i3--) { if (Compare2<float>()(max_values[i3], max_values[i3 - 1])) swap_func(i3, i3 - 1); else break; } s_index += after_num; } if (!sort_value) { for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (std::greater<int>()(max_indexes[i3 - 1], max_indexes[i3])) { swap_func(i3, i3 - 1); } } } } if (dst_data) { for (int i2 = 0; i2 < src_k; i2++) dst_data[i0 * src_k * after_num + i2 * after_num + first_index + i1] = max_values[i2]; } if (dst_idx) { for (int i2 = 0; i2 < src_k; i2++) dst_idx[i0 * src_k * after_num + i2 * after_num + first_index + i1] = max_indexes[i2]; } }); } template <template <typename> class Compare> void topk(const float* src_data, float* dst_data, int* dst_idx, SizeVector in_dims, int32_t axis, int before_num, int dim, int src_k, int count_vec, bool sort_value) { parallel_for(before_num, [&](int i0) { std::vector<float> max_values(src_k + 1); std::vector<int> max_indexes(src_k + 1); float tmp_value; int tmp_index; int s_index = i0 * dim; auto swap_func = [&](int index1, int index2) { tmp_value = max_values[index1]; max_values[index1] = max_values[index2]; max_values[index2] = tmp_value; tmp_index = max_indexes[index1]; max_indexes[index1] = max_indexes[index2]; max_indexes[index2] = tmp_index; }; for (int i2 = 0; i2 < src_k; i2++) { max_values[i2] = src_data[s_index]; max_indexes[i2] = i2; s_index++; } for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (Compare<float>()(max_values[i3], max_values[i3 - 1])) { swap_func(i3, i3 - 1); } } } for (int i2 = src_k; i2 < dim; i2++) { max_values[src_k] = src_data[s_index]; max_indexes[src_k] = i2; for (int i3 = src_k; i3 > 0; i3--) { if (Compare<float>()(max_values[i3], max_values[i3 - 1])) swap_func(i3, i3 - 1); else break; } s_index++; } if (!sort_value) { for (int i2 = 0; i2 < src_k - 1; i2++) { for (int i3 = src_k - 1; i3 > i2; i3--) { if (std::greater<int>()(max_indexes[i3 - 1], max_indexes[i3])) { swap_func(i3, i3 - 1); } } } } if (dst_data) { for (int i2 = 0; i2 < src_k; i2++) dst_data[i0 * src_k + i2] = max_values[i2]; } if (dst_idx) { for (int i2 = 0; i2 < src_k; i2++) dst_idx[i0 * src_k + i2] = max_indexes[i2]; } }); } void topk_func(float* src, float* dst_data, int* dst_idx, std::vector<int32_t> src_dims, uint32_t K, bool largest, bool sorted, uint32_t axis) { auto in_dims = src_dims; size_t axis_dim; size_t axis_stride = 1; size_t axis_step = 1; int count_vec = 32; bool is_last_dim = false; int src_k = K; bool mode_max, sort_value; int dim, before_num; int axis_ = -1; if (axis_ < 0) axis_ += src_dims.size(); axis = static_cast<size_t>(axis_); if (largest) mode_max = true; else mode_max = false; if (sorted) sort_value = true; else sort_value = false; int j; for (j = src_dims.size() - 1; j >= 0; j--) { if (src_dims[j] != 1) break; } if (static_cast<size_t>(j) == axis) is_last_dim = true; for (size_t i = 0; i < axis; i++) { axis_step *= src_dims[i]; } axis_dim = src_dims[axis]; for (size_t i = (axis + 1); i < src_dims.size(); i++) { axis_stride *= src_dims[i]; } dim = static_cast<int>(src_dims[axis]); before_num = count(src_dims, 0, axis); if (src_k == 1) { if (is_last_dim) { if (mode_max) top1<std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); else top1<std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } else { if (mode_max) top1_axis<cmpgt_ps, std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); else top1_axis<cmplt_ps, std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } } else { if (is_last_dim) { if (mode_max) { topk<std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } else topk<std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } else { if (mode_max) topk_axis<cmpgt_ps, std::greater>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); else topk_axis<cmplt_ps, std::less>(src, dst_data, dst_idx, in_dims, axis, before_num, dim, src_k, count_vec, sort_value); } } } #if defined(__GNUC__) && (__GNUC__ > 9) static __m512 __mm512_fake_erf(__m512 src) { auto abssrc = _mm512_abs_ps(src); __mmask16 erf_mask = _mm512_cmp_ps_mask(abssrc, _mm512_set1_ps(1.0), _CMP_LT_OQ); // < 1 __m512 dst512 = erf_avx512(src); __m512 dstc512 = erfc_avx512(src); return _mm512_mask_blend_ps(erf_mask, dstc512, dst512); } static void erf_func(float* src, float* dst, size_t len) { int i; for (i = 0; i + 16 <= len; i += 16) { __m512 src512 = _mm512_loadu_ps(src + i); __m512 abssrc = _mm512_abs_ps(src512); __mmask16 erf_mask = _mm512_cmp_ps_mask(abssrc, _mm512_set1_ps(1.0), _CMP_LT_OQ); // < 1 __mmask16 erfc_mask = ~erf_mask; auto dst512 = __mm512_fake_erf(src512); _mm512_storeu_ps(dst + i, dst512); } int remain = len - i; if (remain) { __mmask16 mask = 0xffff; mask = mask >> (16 - remain); __m512 src512 = _mm512_maskz_loadu_ps(mask, src + i); __mmask16 erf_mask = _mm512_cmp_ps_mask(src512, _mm512_set1_ps(1.0), _CMP_LT_OQ); // < 1 __mmask16 erfc_mask = ~erf_mask; auto dst512 = __mm512_fake_erf(src512); _mm512_mask_storeu_ps(dst + i, mask, dst512); // printf("erf_p remain...\n"); } return; } #else static void erf_func(float* src, float* dst, size_t len) { assert(0); } #endif #if defined(__GNUC__) && (__GNUC__ > 9) static void erf_bf16_func(int16_t* src, float* dst, size_t len) { int i = 0; int vec_size = 512 / 16; __mmask16 mask16 = 0xFFFF; for (; i <= len - vec_size; i += vec_size) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto a1 = _mm512_cvtbf16f32_load(mask16, src + i + 16); auto erf_dst_a0 = __mm512_fake_erf(a0); auto erf_dst_a1 = __mm512_fake_erf(a1); auto C_bf16 = _mm512_cvtne2ps_pbh(erf_dst_a1, erf_dst_a0); _mm512_mask_storeu_ps(dst + i / 2, mask16, _mm512_castsi512_ps(C_bf16)); } if ((len - i) > 16) { auto a0 = _mm512_cvtbf16f32_load(mask16, src + i); auto out0 = __mm512_fake_erf(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_storeu_ps(dst + i / 2, _mm256_castsi256_ps(C_bf16)); i += 16; } if (len - i) { auto tail_mask = calculat_offset(len - i, vec_size); auto a0 = _mm512_cvtbf16f32_load(tail_mask, src + i); auto out0 = __mm512_fake_erf(a0); auto C_bf16 = _mm512_cvtneps_pbh(out0); _mm256_mask_storeu_ps(dst + i, tail_mask, _mm256_castsi256_ps(C_bf16)); } return; } #else static void erf_bf16_func(int16_t* src, float* dst, size_t len) { assert(0); } #endif // nms function related enum class boxEncoding { CORNER, CENTER }; struct filteredBoxes { float score; int class_index; int box_index; filteredBoxes() : score(0), class_index(0), box_index(0) {} filteredBoxes(float _score, int _class_index, int _box_index) : score(_score), class_index(_class_index), box_index(_box_index) {} }; struct Box { float score; int class_index; int box_index; Box() {} Box(float _score, int _class_index, int _box_index) : score(_score), class_index(_class_index), box_index(_box_index) {} }; void nms_func(float* boxes, float* scores, size_t batch_idx, size_t class_num, size_t num_boxes, size_t max_num_outputs, float score_threshold, float iou_threshold, int32_t* output_indices) { auto intersectionOverUnion = [](const float* boxesI, const float* boxesJ, boxEncoding boxEncodingType) { float yminI, xminI, ymaxI, xmaxI, yminJ, xminJ, ymaxJ, xmaxJ; if (boxEncodingType == boxEncoding::CENTER) { // box format: x_center, y_center, width, height yminI = boxesI[1] - boxesI[3] / 2.f; xminI = boxesI[0] - boxesI[2] / 2.f; ymaxI = boxesI[1] + boxesI[3] / 2.f; xmaxI = boxesI[0] + boxesI[2] / 2.f; yminJ = boxesJ[1] - boxesJ[3] / 2.f; xminJ = boxesJ[0] - boxesJ[2] / 2.f; ymaxJ = boxesJ[1] + boxesJ[3] / 2.f; xmaxJ = boxesJ[0] + boxesJ[2] / 2.f; } else { // box format: y1, x1, y2, x2 yminI = (std::min)(boxesI[0], boxesI[2]); xminI = (std::min)(boxesI[1], boxesI[3]); ymaxI = (std::max)(boxesI[0], boxesI[2]); xmaxI = (std::max)(boxesI[1], boxesI[3]); yminJ = (std::min)(boxesJ[0], boxesJ[2]); xminJ = (std::min)(boxesJ[1], boxesJ[3]); ymaxJ = (std::max)(boxesJ[0], boxesJ[2]); xmaxJ = (std::max)(boxesJ[1], boxesJ[3]); } float areaI = (ymaxI - yminI) * (xmaxI - xminI); float areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ); if (areaI <= 0.f || areaJ <= 0.f) return 0.f; float intersection_area = (std::max)((std::min)(ymaxI, ymaxJ) - (std::max)(yminI, yminJ), 0.f) * (std::max)((std::min)(xmaxI, xmaxJ) - (std::max)(xminI, xminJ), 0.f); return intersection_area / (areaI + areaJ - intersection_area); }; size_t numFiltBox; bool sort_result_descending = true; boxEncoding boxEncodingType = boxEncoding::CORNER; if (max_num_outputs == 0) { return; } std::vector<filteredBoxes> filtBoxes(num_boxes); std::vector<Box> sorted_boxes; for (int box_idx = 0; box_idx < num_boxes; box_idx++) { float* scores_ptr = scores + box_idx * class_num; int idx = std::max_element(scores_ptr, scores_ptr + class_num) - scores_ptr; float score = scores_ptr[idx]; if (score > score_threshold) { sorted_boxes.emplace_back(Box(score, idx, box_idx)); } } int io_selection_size = 0; if (sorted_boxes.size() > 0) { auto _compare = [](const Box l, const Box r) { return (l.score > r.score || ((l.score == r.score) && (l.box_index < r.box_index))); }; std::sort(sorted_boxes.begin(), sorted_boxes.end(), _compare); for (int i = 0; i < sorted_boxes.size(); i++) { auto score = sorted_boxes[i].score; auto idx = sorted_boxes[i].class_index; auto box_idx = sorted_boxes[i].box_index; } filtBoxes[0] = filteredBoxes(sorted_boxes[0].score, sorted_boxes[0].class_index, sorted_boxes[0].box_index); io_selection_size++; for (size_t box_idx = 1; (box_idx < sorted_boxes.size()) && (io_selection_size < max_num_outputs); box_idx++) { bool box_is_selected = true; for (int idx = io_selection_size - 1; idx >= 0; idx--) { float iou = intersectionOverUnion( &boxes[sorted_boxes[box_idx].box_index * 4], &boxes[filtBoxes[idx].box_index * 4], boxEncodingType); if (iou >= iou_threshold) { box_is_selected = false; break; } } if (box_is_selected) { filtBoxes[io_selection_size] = filteredBoxes( sorted_boxes[box_idx].score, sorted_boxes[box_idx].class_index, sorted_boxes[box_idx].box_index); io_selection_size++; } } } numFiltBox = io_selection_size; memset(output_indices, max_num_outputs * 3, 0); memset(output_indices, max_num_outputs, batch_idx); for (size_t idx = 0; idx < numFiltBox; idx++) { output_indices[max_num_outputs + 2 * idx] = filtBoxes[idx].class_index; output_indices[max_num_outputs + 2 * idx + 1] = filtBoxes[idx].box_index; } } } // namespace dnnl_utils
StaticRTree.h
/* Copyright (c) 2013, Project OSRM, Dennis Luxen, others All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef STATICRTREE_H #define STATICRTREE_H #include "DeallocatingVector.h" #include "HilbertValue.h" #include "PhantomNodes.h" #include "QueryNode.h" #include "SharedMemoryFactory.h" #include "SharedMemoryVectorWrapper.h" #include "../Util/MercatorUtil.h" #include "../Util/OSRMException.h" #include "../Util/SimpleLogger.h" #include "../typedefs.h" #include <osrm/Coordinate.h> #include <boost/assert.hpp> #include <boost/filesystem.hpp> #include <boost/filesystem/fstream.hpp> #include <boost/thread.hpp> #include <algorithm> #include <array> #include <chrono> #include <limits> #include <memory> #include <queue> #include <string> #include <vector> // tuning parameters const static uint32_t RTREE_BRANCHING_FACTOR = 64; const static uint32_t RTREE_LEAF_NODE_SIZE = 1024; static boost::thread_specific_ptr<boost::filesystem::ifstream> thread_local_rtree_stream; // Implements a static, i.e. packed, R-tree template <class DataT, class CoordinateListT = std::vector<FixedPointCoordinate>, bool UseSharedMemory = false> class StaticRTree { public: struct RectangleInt2D { RectangleInt2D() : min_lon(INT_MAX), max_lon(INT_MIN), min_lat(INT_MAX), max_lat(INT_MIN) {} int32_t min_lon, max_lon; int32_t min_lat, max_lat; inline void InitializeMBRectangle(const std::array<DataT, RTREE_LEAF_NODE_SIZE> &objects, const uint32_t element_count, const std::vector<NodeInfo> &coordinate_list) { for (uint32_t i = 0; i < element_count; ++i) { min_lon = std::min(min_lon, std::min(coordinate_list.at(objects[i].u).lon, coordinate_list.at(objects[i].v).lon)); max_lon = std::max(max_lon, std::max(coordinate_list.at(objects[i].u).lon, coordinate_list.at(objects[i].v).lon)); min_lat = std::min(min_lat, std::min(coordinate_list.at(objects[i].u).lat, coordinate_list.at(objects[i].v).lat)); max_lat = std::max(max_lat, std::max(coordinate_list.at(objects[i].u).lat, coordinate_list.at(objects[i].v).lat)); } } inline void AugmentMBRectangle(const RectangleInt2D &other) { min_lon = std::min(min_lon, other.min_lon); max_lon = std::max(max_lon, other.max_lon); min_lat = std::min(min_lat, other.min_lat); max_lat = std::max(max_lat, other.max_lat); } inline FixedPointCoordinate Centroid() const { FixedPointCoordinate centroid; // The coordinates of the midpoints are given by: // x = (x1 + x2) /2 and y = (y1 + y2) /2. centroid.lon = (min_lon + max_lon) / 2; centroid.lat = (min_lat + max_lat) / 2; return centroid; } inline bool Intersects(const RectangleInt2D &other) const { FixedPointCoordinate upper_left(other.max_lat, other.min_lon); FixedPointCoordinate upper_right(other.max_lat, other.max_lon); FixedPointCoordinate lower_right(other.min_lat, other.max_lon); FixedPointCoordinate lower_left(other.min_lat, other.min_lon); return (Contains(upper_left) || Contains(upper_right) || Contains(lower_right) || Contains(lower_left)); } inline float GetMinDist(const FixedPointCoordinate &location) const { bool is_contained = Contains(location); if (is_contained) { return 0.; } float min_dist = std::numeric_limits<float>::max(); min_dist = std::min(min_dist, FixedPointCoordinate::ApproximateEuclideanDistance( location.lat, location.lon, max_lat, min_lon)); min_dist = std::min(min_dist, FixedPointCoordinate::ApproximateEuclideanDistance( location.lat, location.lon, max_lat, max_lon)); min_dist = std::min(min_dist, FixedPointCoordinate::ApproximateEuclideanDistance( location.lat, location.lon, min_lat, max_lon)); min_dist = std::min(min_dist, FixedPointCoordinate::ApproximateEuclideanDistance( location.lat, location.lon, min_lat, min_lon)); return min_dist; } inline float GetMinMaxDist(const FixedPointCoordinate &location) const { float min_max_dist = std::numeric_limits<float>::max(); // Get minmax distance to each of the four sides FixedPointCoordinate upper_left(max_lat, min_lon); FixedPointCoordinate upper_right(max_lat, max_lon); FixedPointCoordinate lower_right(min_lat, max_lon); FixedPointCoordinate lower_left(min_lat, min_lon); min_max_dist = std::min( min_max_dist, std::max( FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_left), FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_right))); min_max_dist = std::min( min_max_dist, std::max( FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_right), FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_right))); min_max_dist = std::min( min_max_dist, std::max(FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_right), FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_left))); min_max_dist = std::min( min_max_dist, std::max(FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_left), FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_left))); return min_max_dist; } inline bool Contains(const FixedPointCoordinate &location) const { const bool lats_contained = (location.lat > min_lat) && (location.lat < max_lat); const bool lons_contained = (location.lon > min_lon) && (location.lon < max_lon); return lats_contained && lons_contained; } inline friend std::ostream &operator<<(std::ostream &out, const RectangleInt2D &rect) { out << rect.min_lat / COORDINATE_PRECISION << "," << rect.min_lon / COORDINATE_PRECISION << " " << rect.max_lat / COORDINATE_PRECISION << "," << rect.max_lon / COORDINATE_PRECISION; return out; } }; typedef RectangleInt2D RectangleT; struct TreeNode { TreeNode() : child_count(0), child_is_on_disk(false) {} RectangleT minimum_bounding_rectangle; uint32_t child_count : 31; bool child_is_on_disk : 1; uint32_t children[RTREE_BRANCHING_FACTOR]; }; private: struct WrappedInputElement { explicit WrappedInputElement(const uint32_t _array_index, const uint64_t _hilbert_value) : m_array_index(_array_index), m_hilbert_value(_hilbert_value) { } WrappedInputElement() : m_array_index(UINT_MAX), m_hilbert_value(0) {} uint32_t m_array_index; uint64_t m_hilbert_value; inline bool operator<(const WrappedInputElement &other) const { return m_hilbert_value < other.m_hilbert_value; } }; struct LeafNode { LeafNode() : object_count(0) {} uint32_t object_count; std::array<DataT, RTREE_LEAF_NODE_SIZE> objects; }; struct QueryCandidate { explicit QueryCandidate(const uint32_t n_id, const float dist) : node_id(n_id), min_dist(dist) { } QueryCandidate() : node_id(UINT_MAX), min_dist(std::numeric_limits<float>::max()) {} uint32_t node_id; float min_dist; inline bool operator<(const QueryCandidate &other) const { return min_dist < other.min_dist; } }; typename ShM<TreeNode, UseSharedMemory>::vector m_search_tree; uint64_t m_element_count; const std::string m_leaf_node_filename; std::shared_ptr<CoordinateListT> m_coordinate_list; public: StaticRTree() = delete; StaticRTree(const StaticRTree &) = delete; // Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1] explicit StaticRTree(std::vector<DataT> &input_data_vector, const std::string tree_node_filename, const std::string leaf_node_filename, const std::vector<NodeInfo> &coordinate_list) : m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename) { SimpleLogger().Write() << "constructing r-tree of " << m_element_count << " edge elements build on-top of " << coordinate_list.size() << " coordinates"; std::chrono::time_point<std::chrono::steady_clock> time0 = std::chrono::steady_clock::now(); std::vector<WrappedInputElement> input_wrapper_vector(m_element_count); HilbertCode get_hilbert_number; // generate auxiliary vector of hilbert-values #pragma omp parallel for schedule(guided) for (uint64_t element_counter = 0; element_counter < m_element_count; ++element_counter) { input_wrapper_vector[element_counter].m_array_index = element_counter; // Get Hilbert-Value for centroid in mercartor projection DataT const &current_element = input_data_vector[element_counter]; FixedPointCoordinate current_centroid = DataT::Centroid(FixedPointCoordinate(coordinate_list.at(current_element.u).lat, coordinate_list.at(current_element.u).lon), FixedPointCoordinate(coordinate_list.at(current_element.v).lat, coordinate_list.at(current_element.v).lon)); current_centroid.lat = COORDINATE_PRECISION * lat2y(current_centroid.lat / COORDINATE_PRECISION); uint64_t current_hilbert_value = get_hilbert_number(current_centroid); input_wrapper_vector[element_counter].m_hilbert_value = current_hilbert_value; } // open leaf file boost::filesystem::ofstream leaf_node_file(leaf_node_filename, std::ios::binary); leaf_node_file.write((char *)&m_element_count, sizeof(uint64_t)); // sort the hilbert-value representatives std::sort(input_wrapper_vector.begin(), input_wrapper_vector.end()); std::vector<TreeNode> tree_nodes_in_level; // pack M elements into leaf node and write to leaf file uint64_t processed_objects_count = 0; while (processed_objects_count < m_element_count) { LeafNode current_leaf; TreeNode current_node; // SimpleLogger().Write() << "reading " << tree_size << " tree nodes in " << // (sizeof(TreeNode)*tree_size) << " bytes"; for (uint32_t current_element_index = 0; RTREE_LEAF_NODE_SIZE > current_element_index; ++current_element_index) { if (m_element_count > (processed_objects_count + current_element_index)) { uint32_t index_of_next_object = input_wrapper_vector[processed_objects_count + current_element_index] .m_array_index; current_leaf.objects[current_element_index] = input_data_vector[index_of_next_object]; ++current_leaf.object_count; } } // generate tree node that resemble the objects in leaf and store it for next level current_node.minimum_bounding_rectangle.InitializeMBRectangle( current_leaf.objects, current_leaf.object_count, coordinate_list); current_node.child_is_on_disk = true; current_node.children[0] = tree_nodes_in_level.size(); tree_nodes_in_level.emplace_back(current_node); // write leaf_node to leaf node file leaf_node_file.write((char *)&current_leaf, sizeof(current_leaf)); processed_objects_count += current_leaf.object_count; } // close leaf file leaf_node_file.close(); uint32_t processing_level = 0; while (1 < tree_nodes_in_level.size()) { std::vector<TreeNode> tree_nodes_in_next_level; uint32_t processed_tree_nodes_in_level = 0; while (processed_tree_nodes_in_level < tree_nodes_in_level.size()) { TreeNode parent_node; // pack RTREE_BRANCHING_FACTOR elements into tree_nodes each for (uint32_t current_child_node_index = 0; RTREE_BRANCHING_FACTOR > current_child_node_index; ++current_child_node_index) { if (processed_tree_nodes_in_level < tree_nodes_in_level.size()) { TreeNode &current_child_node = tree_nodes_in_level[processed_tree_nodes_in_level]; // add tree node to parent entry parent_node.children[current_child_node_index] = m_search_tree.size(); m_search_tree.emplace_back(current_child_node); // augment MBR of parent parent_node.minimum_bounding_rectangle.AugmentMBRectangle( current_child_node.minimum_bounding_rectangle); // increase counters ++parent_node.child_count; ++processed_tree_nodes_in_level; } } tree_nodes_in_next_level.emplace_back(parent_node); } tree_nodes_in_level.swap(tree_nodes_in_next_level); ++processing_level; } BOOST_ASSERT_MSG(1 == tree_nodes_in_level.size(), "tree broken, more than one root node"); // last remaining entry is the root node, store it m_search_tree.emplace_back(tree_nodes_in_level[0]); // reverse and renumber tree to have root at index 0 std::reverse(m_search_tree.begin(), m_search_tree.end()); #pragma omp parallel for schedule(guided) for (uint32_t i = 0; i < m_search_tree.size(); ++i) { TreeNode &current_tree_node = m_search_tree[i]; for (uint32_t j = 0; j < current_tree_node.child_count; ++j) { const uint32_t old_id = current_tree_node.children[j]; const uint32_t new_id = m_search_tree.size() - old_id - 1; current_tree_node.children[j] = new_id; } } // open tree file boost::filesystem::ofstream tree_node_file(tree_node_filename, std::ios::binary); uint32_t size_of_tree = m_search_tree.size(); BOOST_ASSERT_MSG(0 < size_of_tree, "tree empty"); tree_node_file.write((char *)&size_of_tree, sizeof(uint32_t)); tree_node_file.write((char *)&m_search_tree[0], sizeof(TreeNode) * size_of_tree); // close tree node file. tree_node_file.close(); std::chrono::time_point<std::chrono::steady_clock> time1 = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = time1 - time0; SimpleLogger().Write() << "finished r-tree construction in " << (elapsed_seconds.count()) << " seconds"; } // Read-only operation for queries explicit StaticRTree(const boost::filesystem::path &node_file, const boost::filesystem::path &leaf_file, const std::shared_ptr<CoordinateListT> coordinate_list) : m_leaf_node_filename(leaf_file.string()) { // open tree node file and load into RAM. m_coordinate_list = coordinate_list; if (!boost::filesystem::exists(node_file)) { throw OSRMException("ram index file does not exist"); } if (0 == boost::filesystem::file_size(node_file)) { throw OSRMException("ram index file is empty"); } boost::filesystem::ifstream tree_node_file(node_file, std::ios::binary); uint32_t tree_size = 0; tree_node_file.read((char *)&tree_size, sizeof(uint32_t)); m_search_tree.resize(tree_size); tree_node_file.read((char *)&m_search_tree[0], sizeof(TreeNode) * tree_size); tree_node_file.close(); // open leaf node file and store thread specific pointer if (!boost::filesystem::exists(leaf_file)) { throw OSRMException("mem index file does not exist"); } if (0 == boost::filesystem::file_size(leaf_file)) { throw OSRMException("mem index file is empty"); } boost::filesystem::ifstream leaf_node_file(leaf_file, std::ios::binary); leaf_node_file.read((char *)&m_element_count, sizeof(uint64_t)); leaf_node_file.close(); // SimpleLogger().Write() << tree_size << " nodes in search tree"; // SimpleLogger().Write() << m_element_count << " elements in leafs"; } explicit StaticRTree(TreeNode *tree_node_ptr, const uint32_t number_of_nodes, const boost::filesystem::path &leaf_file, std::shared_ptr<CoordinateListT> coordinate_list) : m_search_tree(tree_node_ptr, number_of_nodes), m_leaf_node_filename(leaf_file.string()), m_coordinate_list(coordinate_list) { // open leaf node file and store thread specific pointer if (!boost::filesystem::exists(leaf_file)) { throw OSRMException("mem index file does not exist"); } if (0 == boost::filesystem::file_size(leaf_file)) { throw OSRMException("mem index file is empty"); } boost::filesystem::ifstream leaf_node_file(leaf_file, std::ios::binary); leaf_node_file.read((char *)&m_element_count, sizeof(uint64_t)); leaf_node_file.close(); if (thread_local_rtree_stream.get()) { thread_local_rtree_stream->close(); } // SimpleLogger().Write() << tree_size << " nodes in search tree"; // SimpleLogger().Write() << m_element_count << " elements in leafs"; } // Read-only operation for queries bool LocateClosestEndPointForCoordinate(const FixedPointCoordinate &input_coordinate, FixedPointCoordinate &result_coordinate, const unsigned zoom_level) { bool ignore_tiny_components = (zoom_level <= 14); DataT nearest_edge; float min_dist = std::numeric_limits<float>::max(); float min_max_dist = std::numeric_limits<float>::max(); bool found_a_nearest_edge = false; // initialize queue with root element std::priority_queue<QueryCandidate> traversal_queue; float current_min_dist = m_search_tree[0].minimum_bounding_rectangle.GetMinDist(input_coordinate); traversal_queue.emplace(0, current_min_dist); while (!traversal_queue.empty()) { const QueryCandidate current_query_node = traversal_queue.top(); traversal_queue.pop(); const bool prune_downward = (current_query_node.min_dist >= min_max_dist); const bool prune_upward = (current_query_node.min_dist >= min_dist); if (!prune_downward && !prune_upward) { // downward pruning TreeNode &current_tree_node = m_search_tree[current_query_node.node_id]; if (current_tree_node.child_is_on_disk) { LeafNode current_leaf_node; LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node); for (uint32_t i = 0; i < current_leaf_node.object_count; ++i) { DataT const &current_edge = current_leaf_node.objects[i]; if (ignore_tiny_components && current_edge.is_in_tiny_cc) { continue; } float current_minimum_distance = FixedPointCoordinate::ApproximateEuclideanDistance( input_coordinate.lat, input_coordinate.lon, m_coordinate_list->at(current_edge.u).lat, m_coordinate_list->at(current_edge.u).lon); if (current_minimum_distance < min_dist) { // found a new minimum min_dist = current_minimum_distance; result_coordinate.lat = m_coordinate_list->at(current_edge.u).lat; result_coordinate.lon = m_coordinate_list->at(current_edge.u).lon; found_a_nearest_edge = true; } current_minimum_distance = FixedPointCoordinate::ApproximateEuclideanDistance( input_coordinate.lat, input_coordinate.lon, m_coordinate_list->at(current_edge.v).lat, m_coordinate_list->at(current_edge.v).lon); if (current_minimum_distance < min_dist) { // found a new minimum min_dist = current_minimum_distance; result_coordinate.lat = m_coordinate_list->at(current_edge.v).lat; result_coordinate.lon = m_coordinate_list->at(current_edge.v).lon; found_a_nearest_edge = true; } } } else { // traverse children, prune if global mindist is smaller than local one for (uint32_t i = 0; i < current_tree_node.child_count; ++i) { const int32_t child_id = current_tree_node.children[i]; const TreeNode &child_tree_node = m_search_tree[child_id]; const RectangleT &child_rectangle = child_tree_node.minimum_bounding_rectangle; const float current_min_dist = child_rectangle.GetMinDist(input_coordinate); const float current_min_max_dist = child_rectangle.GetMinMaxDist(input_coordinate); if (current_min_max_dist < min_max_dist) { min_max_dist = current_min_max_dist; } if (current_min_dist > min_max_dist) { continue; } if (current_min_dist > min_dist) { // upward pruning continue; } traversal_queue.emplace(child_id, current_min_dist); } } } } return found_a_nearest_edge; } bool FindPhantomNodeForCoordinate(const FixedPointCoordinate &input_coordinate, PhantomNode &result_phantom_node, const unsigned zoom_level) { // SimpleLogger().Write() << "searching for coordinate " << input_coordinate; const bool ignore_tiny_components = (zoom_level <= 14); DataT nearest_edge; float min_dist = std::numeric_limits<float>::max(); float min_max_dist = std::numeric_limits<float>::max(); bool found_a_nearest_edge = false; FixedPointCoordinate nearest, current_start_coordinate, current_end_coordinate; // initialize queue with root element std::priority_queue<QueryCandidate> traversal_queue; float current_min_dist = m_search_tree[0].minimum_bounding_rectangle.GetMinDist(input_coordinate); traversal_queue.emplace(0, current_min_dist); BOOST_ASSERT_MSG(std::numeric_limits<float>::epsilon() > (0. - traversal_queue.top().min_dist), "Root element in NN Search has min dist != 0."); LeafNode current_leaf_node; while (!traversal_queue.empty()) { const QueryCandidate current_query_node = traversal_queue.top(); traversal_queue.pop(); const bool prune_downward = (current_query_node.min_dist >= min_max_dist); const bool prune_upward = (current_query_node.min_dist >= min_dist); if (!prune_downward && !prune_upward) { // downward pruning const TreeNode &current_tree_node = m_search_tree[current_query_node.node_id]; if (current_tree_node.child_is_on_disk) { LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node); for (uint32_t i = 0; i < current_leaf_node.object_count; ++i) { DataT &current_edge = current_leaf_node.objects[i]; if (ignore_tiny_components && current_edge.is_in_tiny_cc) { continue; } float current_ratio = 0.; const float current_perpendicular_distance = FixedPointCoordinate::ComputePerpendicularDistance( m_coordinate_list->at(current_edge.u), m_coordinate_list->at(current_edge.v), input_coordinate, nearest, current_ratio); BOOST_ASSERT(0. <= current_perpendicular_distance); if ((current_perpendicular_distance < min_dist) && !EpsilonCompare(current_perpendicular_distance, min_dist)) { // found a new minimum min_dist = current_perpendicular_distance; // TODO: use assignment c'tor in PhantomNode result_phantom_node.forward_node_id = current_edge.forward_edge_based_node_id; result_phantom_node.reverse_node_id = current_edge.reverse_edge_based_node_id; result_phantom_node.name_id = current_edge.name_id; result_phantom_node.forward_weight = current_edge.forward_weight; result_phantom_node.reverse_weight = current_edge.reverse_weight; result_phantom_node.forward_offset = current_edge.forward_offset; result_phantom_node.reverse_offset = current_edge.reverse_offset; result_phantom_node.packed_geometry_id = current_edge.packed_geometry_id; result_phantom_node.fwd_segment_position = current_edge.fwd_segment_position; result_phantom_node.location = nearest; current_start_coordinate.lat = m_coordinate_list->at(current_edge.u).lat; current_start_coordinate.lon = m_coordinate_list->at(current_edge.u).lon; current_end_coordinate.lat = m_coordinate_list->at(current_edge.v).lat; current_end_coordinate.lon = m_coordinate_list->at(current_edge.v).lon; nearest_edge = current_edge; found_a_nearest_edge = true; } } } else { // traverse children, prune if global mindist is smaller than local one for (uint32_t i = 0; i < current_tree_node.child_count; ++i) { const int32_t child_id = current_tree_node.children[i]; TreeNode &child_tree_node = m_search_tree[child_id]; RectangleT &child_rectangle = child_tree_node.minimum_bounding_rectangle; const float current_min_dist = child_rectangle.GetMinDist(input_coordinate); const float current_min_max_dist = child_rectangle.GetMinMaxDist(input_coordinate); if (current_min_max_dist < min_max_dist) { min_max_dist = current_min_max_dist; } if (current_min_dist > min_max_dist) { continue; } if (current_min_dist > min_dist) { // upward pruning continue; } traversal_queue.emplace(child_id, current_min_dist); } } } } // Hack to fix rounding errors and wandering via nodes. if (1 == std::abs(input_coordinate.lon - result_phantom_node.location.lon)) { result_phantom_node.location.lon = input_coordinate.lon; } if (1 == std::abs(input_coordinate.lat - result_phantom_node.location.lat)) { result_phantom_node.location.lat = input_coordinate.lat; } float ratio = 0.f; if (found_a_nearest_edge) { const float distance_1 = FixedPointCoordinate::ApproximateEuclideanDistance( current_start_coordinate, result_phantom_node.location); const float distance_2 = FixedPointCoordinate::ApproximateEuclideanDistance( current_start_coordinate, current_end_coordinate); ratio = distance_1 / distance_2; ratio = std::min(1.f, ratio); if (SPECIAL_NODEID != result_phantom_node.forward_node_id) { result_phantom_node.forward_weight *= ratio; } if (SPECIAL_NODEID != result_phantom_node.reverse_node_id) { result_phantom_node.reverse_weight *= (1. - ratio); } } return found_a_nearest_edge; } private: inline void LoadLeafFromDisk(const uint32_t leaf_id, LeafNode &result_node) { if (!thread_local_rtree_stream.get() || !thread_local_rtree_stream->is_open()) { thread_local_rtree_stream.reset(new boost::filesystem::ifstream( m_leaf_node_filename, std::ios::in | std::ios::binary)); } if (!thread_local_rtree_stream->good()) { thread_local_rtree_stream->clear(std::ios::goodbit); SimpleLogger().Write(logDEBUG) << "Resetting stale filestream"; } uint64_t seek_pos = sizeof(uint64_t) + leaf_id * sizeof(LeafNode); thread_local_rtree_stream->seekg(seek_pos); thread_local_rtree_stream->read((char *)&result_node, sizeof(LeafNode)); } inline bool EdgesAreEquivalent(const FixedPointCoordinate &a, const FixedPointCoordinate &b, const FixedPointCoordinate &c, const FixedPointCoordinate &d) const { return (a == b && c == d) || (a == c && b == d) || (a == d && b == c); } template<typename FloatT> inline bool EpsilonCompare(const FloatT d1, const FloatT d2) const { return (std::abs(d1 - d2) < std::numeric_limits<FloatT>::epsilon()); } }; //[1] "On Packing R-Trees"; I. Kamel, C. Faloutsos; 1993; DOI: 10.1145/170088.170403 //[2] "Nearest Neighbor Queries", N. Roussopulos et al; 1995; DOI: 10.1145/223784.223794 #endif // STATICRTREE_H
mediancut.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdlib.h> #include <stddef.h> #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float)) static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]); struct box { f_pixel color; f_pixel variance; double sum, total_error, max_error; unsigned int ind; unsigned int colors; }; ALWAYS_INLINE static double variance_diff(double val, const double good_enough); inline static double variance_diff(double val, const double good_enough) { val *= val; if (val < good_enough*good_enough) return val*0.25; return val; } /** Weighted per-channel variance of the box. It's used to decide which channel to split by */ static f_pixel box_variance(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double variancea=0, variancer=0, varianceg=0, varianceb=0; for(unsigned int i = 0; i < box->colors; ++i) { const f_pixel px = achv[box->ind + i].acolor; double weight = achv[box->ind + i].adjusted_weight; variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight; variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight; varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight; varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight; } return (f_pixel){ .a = variancea*(4.0/16.0), .r = variancer*(7.0/16.0), .g = varianceg*(9.0/16.0), .b = varianceb*(5.0/16.0), }; } static double box_max_error(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double max_error = 0; for(unsigned int i = 0; i < box->colors; ++i) { const double diff = colordifference(mean, achv[box->ind + i].acolor); if (diff > max_error) { max_error = diff; } } return max_error; } ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h); static inline void hist_item_swap(hist_item *l, hist_item *r) { if (l != r) { hist_item t = *l; *l = *r; *r = t; } } ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len); inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len) { if (len < 32) { return len/2; } const unsigned int aidx=8, bidx=len/2, cidx=len-1; const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value; return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx )) : ((b > c) ? bidx : ((a < c) ? aidx : cidx )); } ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len); inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len) { unsigned int l = 1, r = len; if (len >= 8) { hist_item_swap(&base[0], &base[qsort_pivot(base,len)]); } const unsigned int pivot_value = base[0].tmp.sort_value; while (l < r) { if (base[l].tmp.sort_value >= pivot_value) { l++; } else { while(l < --r && base[r].tmp.sort_value <= pivot_value) {} hist_item_swap(&base[l], &base[r]); } } l--; hist_item_swap(&base[0], &base[l]); return l; } /** quick select algorithm */ static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start) { for(;;) { const unsigned int l = qsort_partition(base, len), r = l+1; if (l > 0 && sort_start < l) { len = l; } else if (r < len && sort_start > r) { base += r; len -= r; sort_start -= r; } else break; } } /** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */ static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar) { do { const unsigned int l = qsort_partition(base, len), r = l+1; // check if sum of left side is smaller than half, // if it is, then it doesn't need to be sorted unsigned int t = 0; double tmpsum = *lowervar; while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight; if (tmpsum < halfvar) { *lowervar = tmpsum; } else { if (l > 0) { hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar); if (res) return res; } else { // End of left recursion. This will be executed in order from the first element. *lowervar += base[0].color_weight; if (*lowervar > halfvar) return &base[0]; } } if (len > r) { base += r; len -= r; // tail-recursive "call" } else { *lowervar += base[r].color_weight; return (*lowervar > halfvar) ? &base[r] : NULL; } } while(1); } static f_pixel get_median(const struct box *b, hist_item achv[]); typedef struct { unsigned int chan; float variance; } channelvariance; static int comparevariance(const void *ch1, const void *ch2) { return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 : (((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0); } /** Finds which channels need to be sorted first and preproceses achv for fast sort */ static double prepare_sort(struct box *b, hist_item achv[]) { /* ** Sort dimensions by their variance, and then sort colors first by dimension with highest variance */ channelvariance channels[4] = { {index_of_channel(a), b->variance.a}, {index_of_channel(r), b->variance.r}, {index_of_channel(g), b->variance.g}, {index_of_channel(b), b->variance.b}, }; qsort(channels, 4, sizeof(channels[0]), comparevariance); const unsigned int ind1 = b->ind; const unsigned int colors = b->colors; #pragma omp parallel for if (colors > 25000) \ schedule(static) default(none) shared(achv, channels) for(unsigned int i=0; i < colors; i++) { const float *chans = (const float *)&achv[ind1 + i].acolor; // Only the first channel really matters. When trying median cut many times // with different histogram weights, I don't want sort randomness to influence outcome. achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) | (unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0); } const f_pixel median = get_median(b, achv); // box will be split to make color_weight of each side even const unsigned int ind = b->ind, end = ind+b->colors; double totalvar = 0; #pragma omp parallel for if (end - ind > 15000) \ schedule(static) default(shared) reduction(+:totalvar) for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j])); return totalvar / 2.0; } /** finds median in unsorted set by sorting only minimum required */ static f_pixel get_median(const struct box *b, hist_item achv[]) { const unsigned int median_start = (b->colors-1)/2; hist_item_sort_range(&(achv[b->ind]), b->colors, median_start); if (b->colors&1) return achv[b->ind + median_start].acolor; // technically the second color is not guaranteed to be sorted correctly // but most of the time it is good enough to be useful return averagepixels(2, &achv[b->ind + median_start]); } /* ** Find the best splittable box. -1 if no boxes are splittable. */ static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse) { int bi=-1; double maxsum=0; for(unsigned int i=0; i < boxes; i++) { if (bv[i].colors < 2) { continue; } // looks only at max variance, because it's only going to split by it const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b)); double thissum = bv[i].sum * MAX(bv[i].variance.a, cv); if (bv[i].max_error > max_mse) { thissum = thissum* bv[i].max_error/max_mse; } if (thissum > maxsum) { maxsum = thissum; bi = i; } } return bi; } inline static double color_weight(f_pixel median, hist_item h) { float diff = colordifference(median, h.acolor); return sqrt(diff) * (sqrt(1.0+h.adjusted_weight)-1.0); } static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv); static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes); static double box_error(const struct box *box, const hist_item achv[]) { f_pixel avg = box->color; double total_error=0; for (unsigned int i = 0; i < box->colors; ++i) { total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight; } return total_error; } static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist) { target_mse *= hist->total_perceptual_weight; double total_error=0; for(unsigned int i=0; i < boxes; i++) { // error is (re)calculated lazily if (bv[i].total_error >= 0) { total_error += bv[i].total_error; } if (total_error > target_mse) return false; } for(unsigned int i=0; i < boxes; i++) { if (bv[i].total_error < 0) { bv[i].total_error = box_error(&bv[i], hist->achv); total_error += bv[i].total_error; } if (total_error > target_mse) return false; } return true; } static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) { box->ind = ind; box->colors = colors; box->sum = sum; box->total_error = -1; box->color = averagepixels(colors, &achv[ind]); #pragma omp task if (colors > 5000) box->variance = box_variance(achv, box); #pragma omp task if (colors > 8000) box->max_error = box_max_error(achv, box); } /* ** Here is the fun part, the median-cut colormap generator. This is based ** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer ** Display," SIGGRAPH 1982 Proceedings, page 297. */ LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*)) { hist_item *achv = hist->achv; LIQ_ARRAY(struct box, bv, newcolors); unsigned int boxes = 1; /* ** Set up the initial box. */ #pragma omp parallel #pragma omp single { double sum = 0; for(unsigned int i=0; i < hist->size; i++) { sum += achv[i].adjusted_weight; } #pragma omp taskgroup { box_init(&bv[0], achv, 0, hist->size, sum); } /* ** Main loop: split boxes until we have enough. */ while (boxes < newcolors) { // first splits boxes that exceed quality limit (to have colors for things like odd green pixel), // later raises the limit to allow large smooth areas/gradients get colors. const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse; const int bi = best_splittable_box(bv, boxes, current_max_mse); if (bi < 0) { break; /* ran out of colors! */ } unsigned int indx = bv[bi].ind; unsigned int clrs = bv[bi].colors; /* Classic implementation tries to get even number of colors or pixels in each subdivision. Here, instead of popularity I use (sqrt(popularity)*variance) metric. Each subdivision balances number of pixels (popular colors) and low variance - boxes can be large if they have similar colors. Later boxes with high variance will be more likely to be split. Median used as expected value gives much better results than mean. */ const double halfvar = prepare_sort(&bv[bi], achv); double lowervar=0; // hist_item_sort_halfvar sorts and sums lowervar at the same time // returns item to break at …minus one, which does smell like an off-by-one error. hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar); unsigned int break_at = MIN(clrs-1, break_p - &achv[indx] + 1); /* ** Split the box. */ double sm = bv[bi].sum; double lowersum = 0; for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight; #pragma omp taskgroup { box_init(&bv[bi], achv, indx, break_at, lowersum); box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum); } ++boxes; if (total_box_error_below_target(target_mse, bv, boxes, hist)) { break; } } } colormap *map = pam_colormap(boxes, malloc, free); set_colormap_from_boxes(map, bv, boxes, achv); adjust_histogram(achv, bv, boxes); return map; } static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv) { /* ** Ok, we've got enough boxes. Now choose a representative color for ** each box. There are a number of possible ways to make this choice. ** One would be to choose the center of the box; this ignores any structure ** within the boxes. Another method would be to average all the colors in ** the box - this is the method specified in Heckbert's paper. */ for(unsigned int bi = 0; bi < boxes; ++bi) { map->palette[bi].acolor = bv[bi].color; /* store total color popularity (perceptual_weight is approximation of it) */ map->palette[bi].popularity = 0; for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { map->palette[bi].popularity += achv[i].perceptual_weight; } } } /* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */ static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes) { for(unsigned int bi = 0; bi < boxes; ++bi) { for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { achv[i].tmp.likely_colormap_index = bi; } } } static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]) { double r = 0, g = 0, b = 0, a = 0, sum = 0; #pragma omp parallel for if (clrs > 25000) \ schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum) for(unsigned int i = 0; i < clrs; i++) { const f_pixel px = achv[i].acolor; const double weight = achv[i].adjusted_weight; sum += weight; a += px.a * weight; r += px.r * weight; g += px.g * weight; b += px.b * weight; } if (sum) { a /= sum; r /= sum; g /= sum; b /= sum; } assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a)); return (f_pixel){.r=r, .g=g, .b=b, .a=a}; }
render.h
#ifndef QUARTZ_RENDER_H #define QUARTZ_RENDER_H #include <quartz/camera.h> #include <quartz/image.h> #include <omp.h> namespace quartz { class renderer { private: const hittable &world; image_sink sink; public: renderer(image_size size, const hittable &w) : sink{image_sink(size)}, world{w} {} const image_sink &render_sink() const { return sink; } const image_sink &render(const ray_tracer &tracer, const camera &cam, int samples_per_pixel); }; const image_sink &renderer::render(const ray_tracer &tracer, const camera &cam, int samples_per_pixel) { #pragma omp parallel for default(none) shared(sink, samples_per_pixel, cam, tracer, world) for (int j = sink.size.image_height - 1; j >= 0; --j) { // std::cerr << "\rScanlines remaining: " << j << " " << std::flush; for (int i = 0; i < sink.size.image_width; i++) { quartz::color pixel_color(0, 0, 0); for (int s = 0; s < samples_per_pixel; s++) { auto u = noisy_scale_to_0_1(i, sink.size.image_width); auto v = noisy_scale_to_0_1(j, sink.size.image_height); quartz::ray r = cam.get_ray(u, v); pixel_color += tracer.trace(r, world); } pixel_color /= static_cast<double>(samples_per_pixel); int px = i, py = sink.size.image_height - j - 1; sink.write_pixel(pixel_color, px, py); } } std::cerr << "\nDone rendering to image sink.\n"; return render_sink(); } } // namespace quartz #endif
XSHA_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 2008,2011 by Solar Designer * * Intrinsics support added by magnum 2011. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_XSHA; #elif FMT_REGISTERS_H john_register_one(&fmt_XSHA); #else #include <string.h> #include "arch.h" #ifdef MMX_COEF #define NBKEYS (MMX_COEF * SHA1_SSE_PARA) #ifdef _OPENMP static unsigned int omp_t = 1; #include <omp.h> #define OMP_SCALE 128 #endif #endif #include "sse-intrinsics.h" #include "params.h" #include "common.h" #include "formats.h" #include "sha.h" #include "johnswap.h" #include "memdbg.h" #define FORMAT_LABEL "xsha" #define FORMAT_NAME "Mac OS X 10.4 - 10.6" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 51 #define CIPHERTEXT_LENGTH 48 #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE 4 #define SALT_ALIGN 4 #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + (3-((i)&3)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF*4 ) //for endianity conversion #else #define MIN_KEYS_PER_CRYPT 1 #ifdef _OPENMP #define MAX_KEYS_PER_CRYPT (0x200 * 3) #else #define MAX_KEYS_PER_CRYPT 0x100 #endif #endif static struct fmt_tests tests[] = { {"12345678F9083C7F66F46A0A102E4CC17EC08C8AF120571B", "abc"}, {"12345678EB8844BFAF2A8CBDD587A37EF8D4A290680D5818", "azertyuiop1"}, {"3234C32AAA335FD20E3F95870E5851BDBE942B79CE4FDD92", "azertyuiop2"}, {"01295B67659E95F32931CEDB3BA50289E2826AF3D5A1422F", "apple"}, {"0E6A48F765D0FFFFF6247FA80D748E615F91DD0C7431E4D9", "macintosh"}, {"A320163F1E6DB42C3949F7E232888ACC7DB7A0A17E493DBA", "test"}, {NULL} }; #ifdef MMX_COEF static ARCH_WORD_32 (*saved_key); static ARCH_WORD_32 (*crypt_key); static ARCH_WORD_32 cur_salt; #else static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1]; static int saved_key_length[MAX_KEYS_PER_CRYPT]; static SHA_CTX ctx_salt; static ARCH_WORD_32 crypt_out[MAX_KEYS_PER_CRYPT][5]; #endif static void init(struct fmt_main *self) { #ifdef MMX_COEF #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * NBKEYS; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * NBKEYS; #endif saved_key = mem_calloc_tiny(SHA_BUF_SIZ*4 * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(BINARY_SIZE * self->params.max_keys_per_crypt, MEM_ALIGN_SIMD); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; /* Require uppercase hex digits (assume ASCII) */ pos = ciphertext; while (atoi16[ARCH_INDEX(*pos)] != 0x7F && *pos < 'a') pos++; return !*pos && pos - ciphertext == CIPHERTEXT_LENGTH; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + 8; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef MMX_COEF alter_endianity(out, BINARY_SIZE); #endif return out; } static void *salt(char *ciphertext) { static unsigned int outbuf[SALT_SIZE / sizeof(int)]; unsigned char *out = (unsigned char*)outbuf; char *p; int i; p = ciphertext; for (i = 0; i < SALT_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef MMX_COEF alter_endianity(out, SALT_SIZE); #endif return out; } #ifdef MMX_COEF static int get_hash_0(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0xf; } static int get_hash_1(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0xff; } static int get_hash_2(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0xfff; } static int get_hash_3(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0xffff; } static int get_hash_4(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0xfffff; } static int get_hash_5(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0xffffff; } static int get_hash_6(int index) { unsigned int x,y; x = index&3; y = index/4; return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*5] & 0x7ffffff; } #else static int get_hash_0(int index) { return crypt_out[index][0] & 0xF; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xFF; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xFFF; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xFFFF; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xFFFFF; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xFFFFFF; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7FFFFFF; } #endif static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { #ifdef MMX_COEF cur_salt = *(ARCH_WORD_32*)salt; #else SHA1_Init(&ctx_salt); SHA1_Update(&ctx_salt, salt, SALT_SIZE); #endif } static void set_key(char *key, int index) { #ifdef MMX_COEF const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key; ARCH_WORD_32 *keybuffer = &saved_key[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF + MMX_COEF]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 4; while((temp = *wkey++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP(temp | (0x80 << 24)); len+=3; goto key_cleaning; } *keybuf_word = JOHNSWAP(temp); len += 4; keybuf_word += MMX_COEF; } *keybuf_word = 0x80000000; key_cleaning: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } keybuffer[14*MMX_COEF] = len << 3; #else int length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; saved_key_length[index] = length; memcpy(saved_key[index], key, length); #endif } static char *get_key(int index) { #ifdef MMX_COEF unsigned int i,s; static char out[PLAINTEXT_LENGTH + 1]; s = ((unsigned int *)saved_key)[15*MMX_COEF + (index&3) + (index>>2)*SHA_BUF_SIZ*MMX_COEF] >> 3; for(i = 0; i < (s - SALT_SIZE); i++) out[i] = ((char*)saved_key)[ GETPOS((i + SALT_SIZE), index) ]; out[i] = 0; return (char *) out; #else saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; #ifdef MMX_COEF int i = 0; #if defined(_OPENMP) #pragma omp parallel for for (i=0; i < omp_t; i++) { #endif unsigned int *in = &saved_key[i*NBKEYS*SHA_BUF_SIZ]; unsigned int *out = &crypt_key[i*NBKEYS*BINARY_SIZE/4]; unsigned int j; for (j=0; j < NBKEYS; j++) in[(j&(MMX_COEF-1)) + (j>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF] = cur_salt; SSESHA1body(in, out, NULL, SSEi_MIXED_IN); #if defined(_OPENMP) } #endif #else int i; #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_length, crypt_out) #endif for (i = 0; i < count; i++) { SHA_CTX ctx; memcpy(&ctx, &ctx_salt, sizeof(ctx)); SHA1_Update(&ctx, saved_key[i], saved_key_length[i]); SHA1_Final((unsigned char *)(crypt_out[i]), &ctx); } #endif return count; } static int cmp_all(void *binary, int count) { #ifdef MMX_COEF unsigned int x,y=0; #ifdef _OPENMP for(;y<SHA1_SSE_PARA*omp_t;y++) #else for(;y<SHA1_SSE_PARA;y++) #endif for(x=0;x<MMX_COEF;x++) { if( ((ARCH_WORD_32 *)binary)[0] == ((ARCH_WORD_32 *)crypt_key)[x+y*MMX_COEF*5] ) return 1; } return 0; #else ARCH_WORD_32 b0 = *(ARCH_WORD_32 *)binary; int i; for (i = 0; i < count; i++) { if (b0 != crypt_out[i][0]) continue; if (!memcmp(binary, crypt_out[i], BINARY_SIZE)) return 1; } return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef MMX_COEF unsigned int x,y; x = index&3; y = index/4; if( ((ARCH_WORD_32 *)binary)[0] != ((ARCH_WORD_32 *)crypt_key)[x+y*MMX_COEF*5] ) return 0; if( ((ARCH_WORD_32 *)binary)[1] != ((ARCH_WORD_32 *)crypt_key)[x+y*MMX_COEF*5+MMX_COEF] ) return 0; if( ((ARCH_WORD_32 *)binary)[2] != ((ARCH_WORD_32 *)crypt_key)[x+y*MMX_COEF*5+2*MMX_COEF] ) return 0; if( ((ARCH_WORD_32 *)binary)[3] != ((ARCH_WORD_32 *)crypt_key)[x+y*MMX_COEF*5+3*MMX_COEF] ) return 0; if( ((ARCH_WORD_32 *)binary)[4] != ((ARCH_WORD_32 *)crypt_key)[x+y*MMX_COEF*5+4*MMX_COEF] ) return 0; return 1; #else return !memcmp(binary, crypt_out[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_XSHA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_OMP | FMT_CASE | FMT_8_BIT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__min_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_int16) // A.*B function (eWiseMult): GB (_AemultB_08__min_int16) // A.*B function (eWiseMult): GB (_AemultB_02__min_int16) // A.*B function (eWiseMult): GB (_AemultB_04__min_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int16) // A*D function (colscale): GB (_AxD__min_int16) // D*A function (rowscale): GB (_DxB__min_int16) // C+=B function (dense accum): GB (_Cdense_accumB__min_int16) // C+=b function (dense accum): GB (_Cdense_accumb__min_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int16) // C=scalar+B GB (_bind1st__min_int16) // C=scalar+B' GB (_bind1st_tran__min_int16) // C=A+scalar GB (_bind2nd__min_int16) // C=A'+scalar GB (_bind2nd_tran__min_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT16 || GxB_NO_MIN_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_critical.c
<ompts:test> <ompts:testdescription>Test which checks the omp critical directive by counting up a variable in a parallelized loop within a critical section.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp critical</ompts:directive> <ompts:testcode> #include <stdio.h> #include <unistd.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int <ompts:testcode:functionname>omp_critical</ompts:testcode:functionname> (FILE * logFile) { <ompts:orphan:vars> int sum; </ompts:orphan:vars> sum=0; int known_sum; <ompts:orphan> #pragma omp parallel { int mysum=0; int i; #pragma omp for for (i = 0; i < 1000; i++) mysum = mysum + i; <ompts:check>#pragma omp critical</ompts:check> sum = mysum +sum; } /* end of parallel */ </ompts:orphan> printf("sum=%d\n",sum); known_sum = 999 * 1000 / 2; return (known_sum == sum); } </ompts:testcode> </ompts:test>
sum_v2.c
/* * Assignment 2 (CSE436) * Kazumi Malhan * 06/08/2016 * * Sum of A[N] */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/timeb.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float #define VECTOR_LENGTH 102400 /* initialize a vector with random floating point numbers */ void init(REAL *A, int N) { int i; for (i = 0; i < N; i++) { A[i] = (double) drand48(); } } /* Function Prototypes */ REAL sum (int N, REAL *A); REAL sum_omp_parallel (int N, REAL *A, int num_tasks); REAL sum_omp_parallel_for (int N, REAL *A, int num_tasks); /* * To compile: gcc sum.c -fopenmp -o sum * To run: ./sum N num_tasks */ int main(int argc, char *argv[]) { int N = VECTOR_LENGTH; int num_tasks = 4; double elapsed_serial, elapsed_para, elapsed_para_for; /* for timing */ if (argc < 3) { fprintf(stderr, "Usage: sum [<N(%d)>] [<#tasks(%d)>]\n", N,num_tasks); fprintf(stderr, "\t Example: ./sum %d %d\n", N,num_tasks); } else { N = atoi(argv[1]); num_tasks = atoi(argv[2]); } REAL *A = (REAL*)malloc(sizeof(REAL)*N); srand48((1 << 12)); init(A, N); /* Serial Run */ elapsed_serial = read_timer(); REAL result = sum(N, A); elapsed_serial = (read_timer() - elapsed_serial); /* Parallel Run */ elapsed_para = read_timer(); result = sum_omp_parallel(N, A, num_tasks); elapsed_para = (read_timer() - elapsed_para); /* Parallel For Run */ elapsed_para_for = read_timer(); result = sum_omp_parallel_for(N, A, num_tasks); elapsed_para_for = (read_timer() - elapsed_para_for); /* you should add the call to each function and time the execution */ printf("======================================================================================================\n"); printf("\tSum %d numbers with %d tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("Sum Serial:\t\t\t%4f\t%4f\n", elapsed_serial * 1.0e3, 2*N / (1.0e6 * elapsed_serial)); printf("Sum Parallel:\t\t\t%4f\t%4f\n", elapsed_para * 1.0e3, 2*N / (1.0e6 * elapsed_para)); printf("Sum Parallel For:\t\t%4f\t%4f\n", elapsed_para_for * 1.0e3, 2*N / (1.0e6 * elapsed_para_for)); free(A); return 0; } /* Serial Implemenration */ REAL sum(int N, REAL *A) { int i; REAL result = 0.0; for (i = 0; i < N; ++i) result += A[i]; return result; } /* Parallel Implemenration */ REAL sum_omp_parallel (int N, REAL *A, int num_tasks) { REAL result = 0.0; int partial_result[num_tasks]; int t; /* Determine if task can be evenly distrubutable */ int each_task = N / num_tasks; int leftover = N - (each_task * num_tasks); #pragma omp parallel shared (N, A, num_tasks, leftover) num_threads(num_tasks) { int i, tid, istart, iend; REAL temp; tid = omp_get_thread_num(); istart = tid * (N / num_tasks); iend = (tid + 1) * (N / num_tasks); for (i = istart; i < iend; ++i) { temp += A[i]; } /* Take care left over */ if (tid < leftover) { temp += A[N - tid - 1]; } partial_result[tid] = temp; } // end of parallel /* Add result together */ for(t=0; t<num_tasks; t++) result += partial_result[t]; return result; } /* Parallel For Implemenration */ REAL sum_omp_parallel_for (int N, REAL *A, int num_tasks) { int i; REAL result = 0.0; # pragma omp parallel shared (N, A, result) private (i) num_threads(num_tasks) { # pragma omp for schedule(runtime) reduction (+:result) nowait for (i = 0; i < N; ++i) { result += A[i]; } } // end of parallel return result; }
gutoga.c
#include <float.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "gutoga.h" struct gutoga_mutex_t *gutoga_mutex_new() { struct gutoga_mutex_t *lock; lock = calloc(1, sizeof(*lock)); if (lock == NULL) { return NULL; } omp_init_lock(&lock->l); return lock; } void gutoga_mutex_destroy(struct gutoga_mutex_t *lock) { omp_destroy_lock(&lock->l); free(lock); } void gutoga_mutex_lock(struct gutoga_mutex_t *lock) { omp_set_lock(&lock->l); } void gutoga_mutex_unlock(struct gutoga_mutex_t *lock) { omp_unset_lock(&lock->l); } int gutoga_cache_add(gutoga_cache_t *cache, BYTE *hash, void *ptr, double score, bool *exists) { *exists = false; if (cache == NULL) { return GUTOGA_SUCCESS; } gutoga_mutex_lock(cache->lock); struct gutoga_cache_entry_t *v = NULL; HASH_FIND(hh, cache->records, hash, SHA256_BLOCK_SIZE, v); *exists = v != NULL; if (v == NULL) { v = calloc(1, sizeof(*v)); if (v == NULL) { gutoga_mutex_unlock(cache->lock); return GUTOGA_ERR_BAD_ALLOC_CACHE; } v->ptr = ptr; v->refs = 1; v->score = score; memcpy(v->hash, (void *)hash, SHA256_BLOCK_SIZE); HASH_ADD(hh, cache->records, hash, SHA256_BLOCK_SIZE, v); } v->ttl = cache->ttl; gutoga_mutex_unlock(cache->lock); return GUTOGA_SUCCESS; } void gutoga_cache_get(gutoga_cache_t *cache, BYTE *hash, void **ptr, double *score) { if (cache == NULL) { return; } gutoga_mutex_lock(cache->lock); struct gutoga_cache_entry_t *v = NULL; HASH_FIND(hh, cache->records, hash, SHA256_BLOCK_SIZE, v); if (v == NULL) { *ptr = NULL; } else { *ptr = v->ptr; if (score != NULL) { *score = v->score; } } gutoga_mutex_unlock(cache->lock); } void gutoga_cache_ref(gutoga_cache_t *cache, BYTE *hash) { if (cache == NULL) { return; } gutoga_mutex_lock(cache->lock); struct gutoga_cache_entry_t *v = NULL; HASH_FIND(hh, cache->records, hash, SHA256_BLOCK_SIZE, v); if (v != NULL) { v->refs++; } gutoga_mutex_unlock(cache->lock); } void gutoga_cache_del(gutoga_cache_t *cache, BYTE *hash) { if (cache == NULL) { return; } gutoga_mutex_lock(cache->lock); struct gutoga_cache_entry_t *v = NULL; HASH_FIND(hh, cache->records, hash, SHA256_BLOCK_SIZE, v); if (v != NULL) { HASH_DEL(cache->records, v); free(v); } gutoga_mutex_unlock(cache->lock); } void gutoga_cache_epoch(void *context, gutoga_cache_t *cache, gutoga_cleanup_fn cleanup) { if (cache == NULL) { return; } gutoga_mutex_lock(cache->lock); struct gutoga_cache_entry_t *v = NULL; struct gutoga_cache_entry_t *tmp = NULL; HASH_ITER(hh, cache->records, v, tmp) { if (v->refs == 0) { v->ttl--; } v->refs = 0; if (v->ttl == 0) { cleanup(context, v->ptr); HASH_DEL(cache->records, v); free(v); } } gutoga_mutex_unlock(cache->lock); } gutoga_cache_t *gutoga_cache_new(const int ttl) { gutoga_cache_t *cache; cache = calloc(1, sizeof(*cache)); if (cache == NULL) { return NULL; } cache->lock = gutoga_mutex_new(); if (cache->lock == NULL) { free(cache); cache = NULL; } cache->ttl = ttl; return cache; } void gutoga_cache_destroy(void *context, gutoga_cache_t *cache, gutoga_cleanup_fn cleanup) { if (cache == NULL) { return; } gutoga_mutex_destroy(cache->lock); struct gutoga_cache_entry_t *v = NULL; struct gutoga_cache_entry_t *tmp = NULL; HASH_ITER(hh, cache->records, v, tmp) { if (cleanup != NULL) { cleanup(context, v->ptr); } HASH_DEL(cache->records, v); free(v); } cache->records = NULL; free(cache); } void do_print(gutoga_node_t *node, char *padding, char *buf, int has_right_sibling) { printf("\n"); printf("%s", padding); printf("%s", buf); if (has_right_sibling) { printf("%d:%d:%d(l", node->id, node->left_edges, node->right_edges); printf(")"); } else { printf("%d:%d:%d(r", node->id, node->left_edges, node->right_edges); printf(")"); } char padding1[1024] = {0}; if (has_right_sibling == 1) { sprintf(padding1, "%s│ ", padding); } else { sprintf(padding1, "%s ", padding); } char *buf_left = "├──"; char *buf_right = "└──"; if (node->right == NULL) { buf_left = "└──"; } if (node->left != NULL) { do_print(node->left, padding1, buf_left, node->right != NULL); } if (node->right != NULL) { do_print(node->right, padding1, buf_right, 0); } } void printer(gutoga_tree_t t) { if (t.root == NULL) { return; } gutoga_node_t *root = t.root; printf("%d:(%d:%d)", root->id, root->left_edges, root->right_edges); char *buf_left = "├──"; char *buf_right = "└──"; if (root->right == NULL) { buf_left = "└──"; } if (root->left != NULL) { do_print(root->left, "", buf_left, root->right != NULL); } if (root->right != NULL) { do_print(root->right, "", buf_right, 0); } printf("\n"); } int gutoga_tree_split_edge(gutoga_tree_t *t, gutoga_node_t *from, gutoga_node_t *to, const int id) { gutoga_node_t *new_node = &t->nodes[t->nodes_len++]; new_node->id = -1; gutoga_node_t *new_leaf = &t->nodes[t->nodes_len++]; new_leaf->id = id; if (from->left == to) { from->left = new_node; from->left_edges += 2; } else { from->right = new_node; from->right_edges += 2; } new_node->left = to; new_node->right = new_leaf; new_node->right_edges = 1; new_node->left_edges = to->left_edges + to->right_edges + 1; return GUTOGA_SUCCESS; } int gutoga_tree_split(gutoga_tree_t *t, gutoga_node_t *node, const int edge, const int id) { if (edge < node->left_edges) { if (edge == node->left_edges - 1) { return gutoga_tree_split_edge(t, node, node->left, id); } else { node->left_edges += 2; return gutoga_tree_split(t, node->left, edge, id); } } else { if (edge == node->left_edges + node->right_edges - 1) { return gutoga_tree_split_edge(t, node, node->right, id); } else { node->right_edges += 2; return gutoga_tree_split(t, node->right, edge - node->left_edges, id); } } } int gutoga_tree_add_node(gutoga_tree_t *t, const int edge, const int id) { if (edge < t->root->left_edges + t->root->right_edges) { return gutoga_tree_split(t, t->root, edge, id); } gutoga_node_t *new_root = &t->nodes[t->nodes_len++]; gutoga_node_t *new_node = &t->nodes[t->nodes_len++]; new_root->id = -1; new_root->left = t->root; new_root->right = new_node; new_root->left_edges = t->root->left_edges + t->root->right_edges + 1; new_root->right_edges = 1; new_node->id = id; t->root = new_root; return GUTOGA_SUCCESS; } static int gutoga_tree_num_nodes(const int size) { return 2 * size - 1; } int gutoga_tree_init_empty(gutoga_tree_t *t, const int size) { t->root = NULL; t->has_score = false; t->score = 0; t->size = size; int nodes = gutoga_tree_num_nodes(size); if (nodes <= 3) { return GUTOGA_ERR_BAD_TREE_SIZE; } t->enc = calloc(size - 2, sizeof(*t->enc)); if (t->enc == NULL) { return GUTOGA_ERR_BAD_ALLOC_TREE; } t->nodes = calloc(nodes, sizeof(*t->nodes)); if (t->nodes == NULL) { return GUTOGA_ERR_BAD_ALLOC_TREE; } return GUTOGA_SUCCESS; } void gutoga_tree_default(gutoga_tree_t *t) { /* -1 */ /* / \ */ /* / \ */ /* 0 1 */ gutoga_node_t root = { .id = -1, .left = t->nodes + 1, .right = t->nodes + 2, .left_edges = 1, .right_edges = 1, }; t->root = t->nodes; t->nodes[0] = root; t->nodes[1].id = 0; t->nodes[2].id = 1; t->nodes_len = 3; t->has_score = false; } int gutoga_tree_init(gutoga_tree_t *t, const int size) { int rc = gutoga_tree_init_empty(t, size); if (rc != GUTOGA_SUCCESS) { return rc; } gutoga_tree_default(t); return GUTOGA_SUCCESS; } static int gutoga_tree_sequence(gutoga_tree_t *t) { for (int i = 0; i < t->size - 2; i++) { int rc = gutoga_tree_add_node(t, t->enc[i], i + 2); if (rc != 0) { return rc; } } return GUTOGA_SUCCESS; } int gutoga_tree_random(void *context, gutoga_tree_t *t, const int size, gutoga_random_int_fn rnd) { int rc = gutoga_tree_init(t, size); if (rc != GUTOGA_SUCCESS) { return rc; } for (int i = 0; i < size - 2; i++) { t->enc[i] = rnd(context, 2 * i + 3); } return gutoga_tree_sequence(t); } int gutoga_tree_init_sequence(void *context, gutoga_tree_t *t, const int *seq, const int size) { int rc = gutoga_tree_init(t, size); if (rc != GUTOGA_SUCCESS) { return rc; } memmove(t->enc, seq, (size - 2) * sizeof(*seq)); return gutoga_tree_sequence(t); } int gutoga_tree_clone(void *context, gutoga_tree_t *dst, gutoga_tree_t src) { int rc = gutoga_tree_init_sequence(context, dst, src.enc, src.size); if (rc != GUTOGA_SUCCESS) { return rc; } dst->has_score = src.has_score; dst->score = src.score; memcpy(dst->root->hash, src.root->hash, SHA256_BLOCK_SIZE); return GUTOGA_SUCCESS; } void gutoga_tree_destroy(void *context, gutoga_ga_t *ga, gutoga_tree_t t) { if (t.nodes != NULL) { free(t.nodes); } if (t.enc != NULL) { free(t.enc); } } int gutoga_node_align(void *context, gutoga_ga_t *ga, gutoga_node_t *node, void **data, double *score) { if (node->id != -1) { return ga->cfg->id_to_ptr(context, node->id, data); } if ((node->left == NULL) ^ (node->right == NULL)) { // By definition all internal nodes have exactly two children. return GUTOGA_ERR_BAD_TREE; } gutoga_cache_get(ga->cache, node->hash, data, score); if (*data != NULL) { return GUTOGA_SUCCESS; } void *left = NULL; void *right = NULL; int rc = gutoga_node_align(context, ga, node->left, &left, NULL); if (rc != GUTOGA_SUCCESS) { return rc; } rc = gutoga_node_align(context, ga, node->right, &right, NULL); if (rc != GUTOGA_SUCCESS) { return rc; } rc = ga->cfg->align(context, left, right, data, score); double sc = 0; if (score != NULL) { sc = *score; } bool exists = false; gutoga_cache_add(ga->cache, node->hash, *data, sc, &exists); if (exists) { ga->cfg->cleanup(context, *data); gutoga_cache_get(ga->cache, node->hash, data, score); } return rc; } void gutoga_node_hash(gutoga_ga_t *ga, gutoga_node_t *node) { SHA256_CTX ctx; sha256_init(&ctx); if (node->id != -1) { sha256_update(&ctx, (BYTE *)&(node->id), sizeof(node->id)); sha256_final(&ctx, node->hash); } else { gutoga_node_hash(ga, node->left); gutoga_node_hash(ga, node->right); sha256_update(&ctx, node->left->hash, SHA256_BLOCK_SIZE); sha256_update(&ctx, node->right->hash, SHA256_BLOCK_SIZE); sha256_final(&ctx, node->hash); } /* printf("hash: %02X %02X %02X\n", node->hash[0], node->hash[1], node->hash[2]); */ gutoga_cache_ref(ga->cache, node->hash); } struct tree_order_retriever_t { int *order; }; void gutoga_tree_traverse(const gutoga_node_t *node, struct tree_order_retriever_t *dst) { if (node->left != NULL) { gutoga_tree_traverse(node->left, dst); } if (node->id != -1) { *dst->order = node->id; dst->order++; } if (node->right != NULL) { gutoga_tree_traverse(node->right, dst); } } int *gutoga_tree_order(const gutoga_tree_t t) { int *dst = calloc(t.size, sizeof(int)); if (dst == NULL) { return NULL; } struct tree_order_retriever_t r = { .order = dst, }; gutoga_tree_traverse(t.root, &r); return dst; } gutoga_population_t *gutoga_population_new(int size) { gutoga_population_t *p = calloc(1, sizeof(struct gutoga_population_t)); if (p == NULL) { return NULL; } p->size = 0; p->cap = size * 2; p->trees = calloc(p->cap, sizeof(*p->trees)); if (p->trees == NULL) { free(p); return NULL; } return p; } void gutoga_population_destroy(void *context, gutoga_ga_t *ga, gutoga_population_t *p) { for (int i = 0; i < p->size; i++) { gutoga_tree_destroy(context, ga, p->trees[i]); } if (p->trees != NULL) { free(p->trees); p->trees = NULL; } free(p); } void gutoga_population_reset(void *context, gutoga_ga_t *ga, gutoga_population_t *p) { for (int i = 0; i < p->size; i++) { gutoga_tree_destroy(context, ga, p->trees[i]); } p->size = 0; } int gutoga_population_append(struct gutoga_population_t *p, const gutoga_tree_t t) { if (p->size >= p->cap) { p->cap *= 2; gutoga_tree_t *m = realloc(p->trees, p->cap * sizeof(*p->trees)); if (m == NULL) { return GUTOGA_ERR_BAD_ALLOC_POPULATION; } p->trees = m; } p->trees[p->size] = t; p->size++; return GUTOGA_SUCCESS; } int gutoga_tree_score(void *context, gutoga_ga_t *ga, gutoga_tree_t *tree, const bool force, double *fitness) { int rc = GUTOGA_SUCCESS; if (!tree->has_score) { void *dst; rc = gutoga_node_align(context, ga, tree->root, &dst, &tree->score); if (rc == GUTOGA_ERR_FITNESS_SKIPPABLE) { tree->score = 0; } } tree->has_score = true; if (fitness != NULL) { *fitness = tree->score; } return rc; } int gutoga_tree_fitness(void *context, gutoga_ga_t *ga, gutoga_tree_t *tree, double *fitness) { return gutoga_tree_score(context, ga, tree, false, fitness); } int gutoga_tree_force_fitness(void *context, gutoga_ga_t *ga, gutoga_tree_t *tree, void **dst) { int rc = gutoga_node_align(context, ga, tree->root, dst, NULL); gutoga_cache_del(ga->cache, tree->root->hash); return rc; } static int points_bin_search(const double *points, const int size, const double point) { int l = 0; int r = size; while (l < r) { int h = (l + r) >> 1; if (points[h] < point) { l = h + 1; } else { r = h; } } return l; } int gutoga_selection_roulette_wheel(void *context, gutoga_ga_t *ga, gutoga_stats_t *stats) { double start = omp_get_wtime(); gutoga_population_reset(context, ga, ga->mates); double *points = calloc(ga->pop->size, sizeof(double)); if (points == NULL) { return GUTOGA_ERR_BAD_ALLOC_SELECTION; } int rc; double sum = 0.0; for (int i = 0; i < ga->pop->size; i++) { double fitness = 0.0; rc = gutoga_tree_fitness(context, ga, ga->pop->trees + i, &fitness); if (rc != GUTOGA_SUCCESS && rc != GUTOGA_ERR_FITNESS_SKIPPABLE) { goto RW_SELECTION_ERROR; } sum += fitness; points[i] = sum; } for (int i = 0; i < ga->pop->size; i++) { double rnd = ga->cfg->rand_double(context) * sum; int ind = points_bin_search(points, ga->pop->size, rnd); gutoga_tree_t tree; rc = gutoga_tree_clone(context, &tree, ga->pop->trees[ind]); if (rc != GUTOGA_SUCCESS) { goto RW_SELECTION_ERROR; } rc = gutoga_population_append(ga->mates, tree); if (rc != GUTOGA_SUCCESS) { goto RW_SELECTION_ERROR; } } rc = GUTOGA_SUCCESS; RW_SELECTION_ERROR: free(points); ga->cfg->logger(context, "selection elapsed: %f; (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf)\n", omp_get_wtime() - start, stats->min, stats->max, stats->avg, stats->best.score); return rc; } void gutoga_population_shuffle(void *context, gutoga_population_t *pop, gutoga_random_int_fn rnd) { for (int i = pop->size - 1; i > 0; i--) { size_t j = rnd(context, i + 1); gutoga_tree_t t = pop->trees[j]; pop->trees[j] = pop->trees[i]; pop->trees[i] = t; } } int gutoga_crossover_one_point(void *context, const gutoga_tree_t parent_a, const gutoga_tree_t parent_b, gutoga_tree_t *child_a, gutoga_tree_t *child_b, gutoga_random_int_fn rnd) { int rc = gutoga_tree_init(child_a, parent_a.size); if (rc != GUTOGA_SUCCESS) { return rc; } rc = gutoga_tree_init(child_b, parent_a.size); if (rc != GUTOGA_SUCCESS) { return rc; } int point = rnd(context, parent_a.size - 2); for (int i = 0; i < point; i++) { child_a->enc[i] = parent_a.enc[i]; child_b->enc[i] = parent_b.enc[i]; } for (int i = point; i < parent_a.size - 2; i++) { child_a->enc[i] = parent_b.enc[i]; child_b->enc[i] = parent_a.enc[i]; } rc = gutoga_tree_sequence(child_a); if (rc != GUTOGA_SUCCESS) { return rc; } return gutoga_tree_sequence(child_b); } int gutoga_mutation_adaptive_prob(void *context, const gutoga_tree_t t, gutoga_stats_t *stats, double *prob) { double cur = t.score; if (cur < stats->avg) { *prob = 0.5; } else { *prob = 0.5 * ((stats->max - cur) / (stats->max - stats->avg)); } return GUTOGA_SUCCESS; } int gutoga_crossover_adaptive_prob(void *context, const gutoga_tree_t a, const gutoga_tree_t b, gutoga_stats_t *stats, double *prob) { double cur = a.score; if (b.score > cur) { cur = b.score; } if (cur < stats->avg) { *prob = 1; } else { *prob = ((stats->max - cur) / (stats->max - stats->avg)); } return GUTOGA_SUCCESS; } int gutoga_mutation_flip(void *context, gutoga_tree_t *t, gutoga_random_int_fn rnd) { int pos = rnd(context, t->size - 2); memset(t->nodes, 0, gutoga_tree_num_nodes(t->size) * sizeof(*t->nodes)); gutoga_tree_default(t); t->enc[pos] = rnd(context, 2 * pos + 3); return gutoga_tree_sequence(t); } int gutoga_population_crossover(void *context, gutoga_ga_t *ga, gutoga_stats_t *stats) { double start = omp_get_wtime(); gutoga_population_reset(context, ga, ga->pop); gutoga_population_shuffle(context, ga->mates, ga->cfg->rand_int); for (int i = 0; i < ga->mates->size / 2; i++) { gutoga_tree_t parent_a = ga->mates->trees[2 * i]; gutoga_tree_t parent_b = ga->mates->trees[2 * i + 1]; gutoga_tree_t child_a; gutoga_tree_t child_b; double prob = 0.0; int rc = ga->cfg->crossover_prob(context, parent_a, parent_b, stats, &prob); if (rc != 0) { return rc; } rc = ga->cfg->crossover(context, parent_a, parent_b, &child_a, &child_b, ga->cfg->rand_int); if (rc != 0) { return rc; } rc = gutoga_population_append(ga->pop, child_a); if (rc != 0) { return rc; } rc = gutoga_population_append(ga->pop, child_b); if (rc != 0) { return rc; } } if (ga->mates->size % 2 == 1) { gutoga_tree_t tree; int rc = gutoga_tree_clone(context, &tree, ga->mates->trees[ga->mates->size - 1]); if (rc != GUTOGA_SUCCESS) { return rc; } rc = gutoga_population_append(ga->mates, tree); if (rc != GUTOGA_SUCCESS) { return rc; } } ga->cfg->logger(context, "crossover elapsed: %f; (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf)\n", omp_get_wtime() - start, stats->min, stats->max, stats->avg, stats->best.score); return GUTOGA_SUCCESS; } int gutoga_population_mutation(void *context, gutoga_ga_t *ga, gutoga_stats_t *stats) { double start = omp_get_wtime(); for (int i = 0; i < ga->pop->size; i++) { double prob = 0.0; int rc = ga->cfg->mutation_prob(context, ga->pop->trees[i], stats, &prob); if (rc != GUTOGA_SUCCESS) { return rc; } if (ga->cfg->rand_double(context) <= prob) { continue; } rc = ga->cfg->mutation(context, ga->pop->trees + i, ga->cfg->rand_int); if (rc != GUTOGA_SUCCESS) { return rc; } ga->pop->trees[i].has_score = false; ga->pop->trees[i].score = 0; } ga->cfg->logger(context, "mutation elapsed: %f; (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf)\n", omp_get_wtime() - start, stats->min, stats->max, stats->avg, stats->best.score); return GUTOGA_SUCCESS; } int gutoga_population_fitness(void *context, gutoga_ga_t *ga, gutoga_stats_t *stats) { double start = omp_get_wtime(); #pragma omp parallel { omp_set_num_threads(ga->cfg->nthreads); int failed = 0; #pragma omp for schedule(dynamic) for (int i = 0; i < ga->pop->size; i++) { int rc = GUTOGA_SUCCESS; #pragma omp atomic read rc = failed; if (rc != GUTOGA_SUCCESS && rc != GUTOGA_ERR_FITNESS_SKIPPABLE) { continue; } gutoga_node_hash(ga, ga->pop->trees[i].root); rc = gutoga_tree_fitness(context, ga, ga->pop->trees + i, NULL); if (rc != GUTOGA_SUCCESS && rc != GUTOGA_ERR_FITNESS_SKIPPABLE) { #pragma omp atomic write failed = rc; } } } double sum = 0.0; int max_ind = -1; for (int i = 0; i < ga->pop->size; i++) { double fitness; int rc = gutoga_tree_fitness(context, ga, ga->pop->trees + i, &fitness); if (rc != GUTOGA_SUCCESS && rc != GUTOGA_ERR_FITNESS_SKIPPABLE) { return rc; } if (fitness < stats->min) { stats->min = fitness; } if (fitness > stats->max) { stats->max = fitness; max_ind = i; } sum += fitness; } stats->avg = sum / ga->pop->size; if (stats->best.size == 0) { int rc = gutoga_tree_clone(context, &stats->best, ga->pop->trees[max_ind]); ga->cfg->logger(context, "fitness elapsed: %f; (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf)\n", omp_get_wtime() - start, stats->min, stats->max, stats->avg, stats->best.score); return rc; } if (stats->best.score < stats->max) { gutoga_tree_destroy(context, ga, stats->best); int rc = gutoga_tree_clone(context, &stats->best, ga->pop->trees[max_ind]); if (rc != GUTOGA_SUCCESS) { return rc; } } ga->cfg->logger(context, "fitness elapsed: %f; (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf)\n", omp_get_wtime() - start, stats->min, stats->max, stats->avg, stats->best.score); return GUTOGA_SUCCESS; } int gutoga_run_iteration(void *context, gutoga_ga_t *ga, gutoga_stats_t *stats) { for (gutoga_iteration_step_fn *step = ga->cfg->iteration_steps; *step != NULL; step++) { int rc = (*step)(context, ga, stats); if (rc != GUTOGA_SUCCESS) { return rc; } } return GUTOGA_SUCCESS; } int gutoga_population_init(void *context, gutoga_ga_t *ga) { for (int i = 0; i < ga->cfg->population_size; i++) { gutoga_tree_t t; int rc = gutoga_tree_random(context, &t, ga->cfg->tree_size, ga->cfg->rand_int); if (rc != GUTOGA_SUCCESS) { return rc; } rc = gutoga_population_append(ga->pop, t); if (rc != GUTOGA_SUCCESS) { return rc; } } return GUTOGA_SUCCESS; } int gutoga_cfg_verify(gutoga_cfg_t *cfg) { if (cfg->population_size < 2) { return GUTOGA_ERR_CFG_POPULATION_SIZE; } if (cfg->tree_size < 3) { return GUTOGA_ERR_CFG_TREE_SIZE; } if (cfg->nthreads < 1) { return GUTOGA_ERR_CFG_NTHREADS; } if (cfg->rand_int == NULL) { return GUTOGA_ERR_CFG_RAND_INT; } if (cfg->rand_double == NULL) { return GUTOGA_ERR_CFG_RAND_DOUBLE; } if (cfg->mutation_prob == NULL) { return GUTOGA_ERR_CFG_MUTATION_PROB; } if (cfg->mutation == NULL) { return GUTOGA_ERR_CFG_MUTATION; } if (cfg->crossover_prob == NULL) { return GUTOGA_ERR_CFG_CROSSOVER_PROB; } if (cfg->crossover == NULL) { return GUTOGA_ERR_CFG_CROSSOVER; } if (cfg->id_to_ptr == NULL) { return GUTOGA_ERR_CFG_ID_TO_PTR; } if (cfg->align == NULL) { return GUTOGA_ERR_CFG_ALIGN; } if (cfg->cleanup == NULL) { return GUTOGA_ERR_CFG_CLEANUP; } if (cfg->population_init == NULL) { return GUTOGA_ERR_CFG_POPULATION_INIT; } if (cfg->iteration_steps == NULL) { return GUTOGA_ERR_CFG_ITERATION_STEPS; } if (cfg->termination_criteria == NULL) { return GUTOGA_ERR_CFG_TERMINATION_CRITERIA; } if (cfg->logger == NULL) { return GUTOGA_ERR_CFG_LOGGER; } if (cfg->cache_ttl < 1) { return GUTOGA_ERR_CFG_CACHE_TTL; } return GUTOGA_SUCCESS; } int gutoga_ga_init(gutoga_ga_t *ga, gutoga_cfg_t *cfg) { int rc = gutoga_cfg_verify(cfg); if (rc != GUTOGA_SUCCESS) { return rc; } ga->cfg = cfg; ga->pop = gutoga_population_new(cfg->population_size); if (ga->pop == NULL) { return GUTOGA_ERR_BAD_ALLOC_POPULATION; } ga->mates = gutoga_population_new(cfg->population_size); if (ga->mates == NULL) { return GUTOGA_ERR_BAD_ALLOC_POPULATION; } ga->cache = gutoga_cache_new(cfg->cache_ttl); if (ga->cache == NULL) { return GUTOGA_ERR_BAD_ALLOC_CACHE; } return GUTOGA_SUCCESS; } void gutoga_ga_destroy(void *context, gutoga_ga_t *ga) { gutoga_cache_destroy(context, ga->cache, ga->cfg->cleanup); gutoga_population_destroy(context, ga, ga->pop); gutoga_population_destroy(context, ga, ga->mates); } static double time_now() { struct timespec tp; clock_gettime(CLOCK_MONOTONIC, &tp); return (double)tp.tv_sec + (double)tp.tv_nsec / 1000000000; } int gutoga_run_ga(void *context, gutoga_ga_t *ga, void **best) { double ga_start = time_now(); int rc = ga->cfg->population_init(context, ga); if (rc != GUTOGA_SUCCESS) { return rc; } gutoga_stats_t stats = {0}; double elapsed = 0.0; bool terminate = false; double best_score = -1; for (stats.iteration = 0; rc == GUTOGA_SUCCESS && !terminate; stats.iteration++) { double start = time_now(); stats.min = DBL_MAX; stats.max = -1; rc = gutoga_run_iteration(context, ga, &stats); elapsed = time_now() - start; if (rc != GUTOGA_SUCCESS) { break; } stats.elapsed = time_now() - ga_start; if (best_score >= stats.max) { stats.unchanged++; } else { best_score = stats.max; stats.unchanged = 0; } ga->cfg->logger(context, "ga: iteration %u: took %.5lfs (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf;unchanged=%d;elapsed=%.5lfs)\n", stats.iteration, elapsed, stats.min, stats.max, stats.avg, stats.best.score, stats.unchanged, stats.elapsed); rc = ga->cfg->termination_criteria(context, ga, &stats, &terminate); gutoga_cache_epoch(context, ga->cache, ga->cfg->cleanup); } if (rc != GUTOGA_SUCCESS) { ga->cfg->logger(context, "ga: iteration %u: elapsed %.5lfs (fail, code: %d)\n", stats.iteration, elapsed, rc); return rc; } ga->cfg->logger(context, "ga: iteration %u: took %.5lfs (min=%.5lf;max=%.5lf;avg=%.5lf;best=%.5lf;unchanged=%d;elapsed=%.5lfs)\n", stats.iteration, elapsed, stats.min, stats.max, stats.avg, stats.best.score, stats.unchanged, stats.elapsed); rc = gutoga_tree_force_fitness(context, ga, &stats.best, best); gutoga_tree_destroy(context, ga, stats.best); return rc; }
GB_unop__identity_int64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_fc64) // op(A') function: GB (_unop_tran__identity_int64_fc64) // C type: int64_t // A type: GxB_FC64_t // cast: int64_t cij = GB_cast_to_int64_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = GB_cast_to_int64_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = GB_cast_to_int64_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_fc64) ( int64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; int64_t z = GB_cast_to_int64_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; int64_t z = GB_cast_to_int64_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
analyze.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % AAA N N AAA L Y Y ZZZZZ EEEEE % % A A NN N A A L Y Y ZZ E % % AAAAA N N N AAAAA L Y ZZZ EEE % % A A N NN A A L Y ZZ E % % A A N N A A LLLLL Y ZZZZZ EEEEE % % % % Analyze An Image % % % % Software Design % % Bill Corbis % % December 1998 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % */ /* Include declarations. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <assert.h> #include <math.h> #include "magick/MagickCore.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % a n a l y z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % analyzeImage() computes the brightness and saturation mean, standard % deviation, kurtosis and skewness and stores these values as attributes % of the image. % % The format of the analyzeImage method is: % % size_t analyzeImage(Image *images,const int argc, % char **argv,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the address of a structure of type Image. % % o argc: Specifies a pointer to an integer describing the number of % elements in the argument vector. % % o argv: Specifies a pointer to a text array containing the command line % arguments. % % o exception: return any errors or warnings in this structure. % */ ModuleExport size_t analyzeImage(Image **images,const int argc, const char **argv,ExceptionInfo *exception) { char text[MaxTextExtent]; double area, brightness, brightness_mean, brightness_standard_deviation, brightness_kurtosis, brightness_skewness, brightness_sum_x, brightness_sum_x2, brightness_sum_x3, brightness_sum_x4, hue, saturation, saturation_mean, saturation_standard_deviation, saturation_kurtosis, saturation_skewness, saturation_sum_x, saturation_sum_x2, saturation_sum_x3, saturation_sum_x4; Image *image; assert(images != (Image **) NULL); assert(*images != (Image *) NULL); assert((*images)->signature == MagickCoreSignature); (void) argc; (void) argv; image=(*images); for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { CacheView *image_view; MagickBooleanType status; ssize_t y; brightness_sum_x=0.0; brightness_sum_x2=0.0; brightness_sum_x3=0.0; brightness_sum_x4=0.0; brightness_mean=0.0; brightness_standard_deviation=0.0; brightness_kurtosis=0.0; brightness_skewness=0.0; saturation_sum_x=0.0; saturation_sum_x2=0.0; saturation_sum_x3=0.0; saturation_sum_x4=0.0; saturation_mean=0.0; saturation_standard_deviation=0.0; saturation_kurtosis=0.0; saturation_skewness=0.0; area=0.0; status=MagickTrue; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p), GetPixelBlue(p),&hue,&saturation,&brightness); brightness*=QuantumRange; brightness_sum_x+=brightness; brightness_sum_x2+=brightness*brightness; brightness_sum_x3+=brightness*brightness*brightness; brightness_sum_x4+=brightness*brightness*brightness*brightness; saturation*=QuantumRange; saturation_sum_x+=saturation; saturation_sum_x2+=saturation*saturation; saturation_sum_x3+=saturation*saturation*saturation; saturation_sum_x4+=saturation*saturation*saturation*saturation; area++; p++; } } image_view=DestroyCacheView(image_view); if (area <= 0.0) break; brightness_mean=brightness_sum_x/area; (void) FormatMagickString(text,MaxTextExtent,"%g",brightness_mean); (void) SetImageProperty(image,"filter:brightness:mean",text); brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/ area*brightness_sum_x/area)); (void) FormatMagickString(text,MaxTextExtent,"%g", brightness_standard_deviation); (void) SetImageProperty(image,"filter:brightness:standard-deviation",text); if (brightness_standard_deviation != 0) brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean* brightness_sum_x3/area+6.0*brightness_mean*brightness_mean* brightness_sum_x2/area-3.0*brightness_mean*brightness_mean* brightness_mean*brightness_mean)/(brightness_standard_deviation* brightness_standard_deviation*brightness_standard_deviation* brightness_standard_deviation)-3.0; (void) FormatMagickString(text,MaxTextExtent,"%g",brightness_kurtosis); (void) SetImageProperty(image,"filter:brightness:kurtosis",text); if (brightness_standard_deviation != 0) brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean* brightness_sum_x2/area+2.0*brightness_mean*brightness_mean* brightness_mean)/(brightness_standard_deviation* brightness_standard_deviation*brightness_standard_deviation); (void) FormatMagickString(text,MaxTextExtent,"%g",brightness_skewness); (void) SetImageProperty(image,"filter:brightness:skewness",text); saturation_mean=saturation_sum_x/area; (void) FormatMagickString(text,MaxTextExtent,"%g",saturation_mean); (void) SetImageProperty(image,"filter:saturation:mean",text); saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/ area*saturation_sum_x/area)); (void) FormatMagickString(text,MaxTextExtent,"%g", saturation_standard_deviation); (void) SetImageProperty(image,"filter:saturation:standard-deviation",text); if (saturation_standard_deviation != 0) saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean* saturation_sum_x3/area+6.0*saturation_mean*saturation_mean* saturation_sum_x2/area-3.0*saturation_mean*saturation_mean* saturation_mean*saturation_mean)/(saturation_standard_deviation* saturation_standard_deviation*saturation_standard_deviation* saturation_standard_deviation)-3.0; (void) FormatMagickString(text,MaxTextExtent,"%g",saturation_kurtosis); (void) SetImageProperty(image,"filter:saturation:kurtosis",text); if (saturation_standard_deviation != 0) saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean* saturation_sum_x2/area+2.0*saturation_mean*saturation_mean* saturation_mean)/(saturation_standard_deviation* saturation_standard_deviation*saturation_standard_deviation); (void) FormatMagickString(text,MaxTextExtent,"%g",saturation_skewness); (void) SetImageProperty(image,"filter:saturation:skewness",text); } return(MagickImageFilterSignature); }
kmp_set_dispatch_buf.c
// RUN: %libomp-compile // RUN: env KMP_DISP_NUM_BUFFERS=0 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=guided // RUN: env KMP_DISP_NUM_BUFFERS=1 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=3 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=4 %libomp-run // RUN: env KMP_DISP_NUM_BUFFERS=7 %libomp-run // UNSUPPORTED: clang-11, clang-12 #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 7 #define MY_MAX 200 #define MY_MIN -200 #define NUM_LOOPS 100 #ifndef MY_SCHEDULE # define MY_SCHEDULE dynamic #endif int a, b, a_known_value, b_known_value; int test_kmp_set_disp_num_buffers() { int success = 1; a = 0; b = 0; // run many small dynamic loops to stress the dispatch buffer system #pragma omp parallel { int i,j; for (j = 0; j < NUM_LOOPS; j++) { #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) nowait for (i = MY_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } } // detect failure if (a != a_known_value || b != b_known_value) { success = 0; printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); } return success; } int main(int argc, char** argv) { int i,j; int num_failed=0; // figure out the known values to compare with calculated result a_known_value = 0; b_known_value = 0; for (j = 0; j < NUM_LOOPS; j++) { for (i = MY_MIN; i < MY_MAX; i+=INCR) a_known_value++; for (i = MY_MAX; i >= MY_MIN; i-=INCR) b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_disp_num_buffers()) { num_failed++; } } if (num_failed == 0) printf("passed\n"); else printf("failed %d\n", num_failed); return num_failed; }
kernel_nussinov.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) typedef char base; void kernel_nussinov(int n, base seq[ n + 0], int table[ n + 0][n + 0]) { int i, j, k; /* Copyright (C) 1991-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8, t9, t10; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if (n >= 2) { for (t2=max(-1,ceild(-n-29,32));t2<=floord(n-1,32);t2++) { lbp=max(0,t2); ubp=min(floord(n-1,32),floord(32*t2+n+29,32)); #pragma omp parallel for private(lbv,ubv,t5,t6,t7,t8,t9,t10) for (t4=lbp;t4<=ubp;t4++) { if ((t2 <= floord(32*t4-n+2,32)) && (t4 >= ceild(n-31,32))) { table[(n-2)][(n-1)] = ((table[(n-2)][(n-1)] >= table[(n-2)][(n-1)-1]) ? table[(n-2)][(n-1)] : table[(n-2)][(n-1)-1]);; table[(n-2)][(n-1)] = ((table[(n-2)][(n-1)] >= table[(n-2)+1][(n-1)]) ? table[(n-2)][(n-1)] : table[(n-2)+1][(n-1)]);; table[(n-2)][(n-1)] = ((table[(n-2)][(n-1)] >= table[(n-2)+1][(n-1)-1]) ? table[(n-2)][(n-1)] : table[(n-2)+1][(n-1)-1]);; } if ((t2 == -1) && (t4 <= floord(n-32,32))) { table[(32*t4+30)][(32*t4+31)] = ((table[(32*t4+30)][(32*t4+31)] >= table[(32*t4+30)][(32*t4+31)-1]) ? table[(32*t4+30)][(32*t4+31)] : table[(32*t4+30)][(32*t4+31)-1]);; table[(32*t4+30)][(32*t4+31)] = ((table[(32*t4+30)][(32*t4+31)] >= table[(32*t4+30)+1][(32*t4+31)]) ? table[(32*t4+30)][(32*t4+31)] : table[(32*t4+30)+1][(32*t4+31)]);; table[(32*t4+30)][(32*t4+31)] = ((table[(32*t4+30)][(32*t4+31)] >= table[(32*t4+30)+1][(32*t4+31)-1]) ? table[(32*t4+30)][(32*t4+31)] : table[(32*t4+30)+1][(32*t4+31)-1]);; } for (t5=max(max(-n+3,32*t2-32*t4),-32*t4-29);t5<=min(min(0,-32*t4+1),32*t2-32*t4+31);t5++) { table[-t5][(-t5+1)] = ((table[-t5][(-t5+1)] >= table[-t5][(-t5+1)-1]) ? table[-t5][(-t5+1)] : table[-t5][(-t5+1)-1]);; table[-t5][(-t5+1)] = ((table[-t5][(-t5+1)] >= table[-t5+1][(-t5+1)]) ? table[-t5][(-t5+1)] : table[-t5+1][(-t5+1)]);; table[-t5][(-t5+1)] = ((table[-t5][(-t5+1)] >= table[-t5+1][(-t5+1)-1]) ? table[-t5][(-t5+1)] : table[-t5+1][(-t5+1)-1]);; for (t7=-t5+2;t7<=min(n-1,32*t4+31);t7++) { table[-t5][t7] = ((table[-t5][t7] >= table[-t5][t7-1]) ? table[-t5][t7] : table[-t5][t7-1]);; table[-t5][t7] = ((table[-t5][t7] >= table[-t5+1][t7]) ? table[-t5][t7] : table[-t5+1][t7]);; table[-t5][t7] = ((table[-t5][t7] >= table[-t5+1][t7-1]+(((seq[-t5])+(seq[t7])) == 3 ? 1 : 0)) ? table[-t5][t7] : table[-t5+1][t7-1]+(((seq[-t5])+(seq[t7])) == 3 ? 1 : 0));; for (t9=-t5+1;t9<=t7-1;t9++) { table[-t5][t7] = ((table[-t5][t7] >= table[-t5][t9] + table[t9+1][t7]) ? table[-t5][t7] : table[-t5][t9] + table[t9+1][t7]);; } } } for (t5=max(32*t2-32*t4,-32*t4+2);t5<=min(0,32*t2-32*t4+31);t5++) { for (t7=32*t4;t7<=min(n-1,32*t4+31);t7++) { table[-t5][t7] = ((table[-t5][t7] >= table[-t5][t7-1]) ? table[-t5][t7] : table[-t5][t7-1]);; table[-t5][t7] = ((table[-t5][t7] >= table[-t5+1][t7]) ? table[-t5][t7] : table[-t5+1][t7]);; table[-t5][t7] = ((table[-t5][t7] >= table[-t5+1][t7-1]+(((seq[-t5])+(seq[t7])) == 3 ? 1 : 0)) ? table[-t5][t7] : table[-t5+1][t7-1]+(((seq[-t5])+(seq[t7])) == 3 ? 1 : 0));; for (t9=-t5+1;t9<=t7-1;t9++) { table[-t5][t7] = ((table[-t5][t7] >= table[-t5][t9] + table[t9+1][t7]) ? table[-t5][t7] : table[-t5][t9] + table[t9+1][t7]);; } } } } } } /* End of CLooG code */ }
openmp_sample.c
/* * Copyright (C) 2009-2013 Intel Corporation. All Rights Reserved. * * The source code contained or described herein and all * documents related to the source code ("Material") are owned by * Intel Corporation or its suppliers or licensors. Title to the * Material remains with Intel Corporation or its suppliers and * licensors. The Material is protected by worldwide copyright * laws and treaty provisions. No part of the Material may be * used, copied, reproduced, modified, published, uploaded, * posted, transmitted, distributed, or disclosed in any way * except as expressly provided in the license provided with the * Materials. No license under any patent, copyright, trade * secret or other intellectual property right is granted to or * conferred upon you by disclosure or delivery of the Materials, * either expressly, by implication, inducement, estoppel or * otherwise, except as expressly provided in the license * provided with the Materials. * * [DESCRIPTION] * Each element of the product matrix c[i][j] is * computed from a unique row and * column of the factor matrices, a[i][k] and b[k][j]. * * In the multithreaded implementation, each thread can * concurrently compute some submatrix of the product without * needing OpenMP data or control synchronization. * * The algorithm uses OpenMP* to parallelize the outer-most loop, * using the "i" row index. * * Both the outer-most "i" loop and middle "k" loop are manually * unrolled by 4. The inner-most "j" loop iterates one-by-one * over the columns of the product and factor matrices. * * [COMPILE] * Use the following compiler options to compile both multi- and * single-threaded versions. * * Parallel compilation: * You must set the stacksize to an appropriate size; otherwise, * the application will generate a segmentation fault. * Linux* and OS X*: appropriate ulimit commands are shown for * bash shell. * * Windows*: /Qstd=c99 /Qopenmp /F256000000 * * Linux*: ulimit -s unlimited * -std=c99 -openmp * * OS X*: ulimit -s 64000 * -std=c99 -openmp * * Serial compilation: * * Use the same command, but omit the -openmp (Linux and OS X) * or /Qopenmp (Windows) option. * */ #include <stdio.h> #include <time.h> #include <float.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef __cplusplus #define bool _Bool #define true 1 #define false 0 #endif // Matrix size constants // Be careful to set your shell's stacksize limit to a high value if you // wish to increase the SIZE. #define SIZE 4800 // Must be a multiple of 8. #define M SIZE/8 #define N SIZE/4 #define P SIZE/2 #define NTIMES 5 // product matrix calculations int main(void) { double a[M][N], b[N][P], c[M][P], walltime; bool nthr_checked=false; time_t start; int i, j, k, l, i1, i2, i3, k1, k2, k3, nthr=1; printf("Using time() for wall clock time\n"); printf("Problem size: c(%d,%d) = a(%d,%d) * b(%d,%d)\n", M, P, M, N, N, P); printf("Calculating product %d time(s)\n", NTIMES); // a is identity matrix for (i=0; i<M; i++) for (j=0; j<N; j++) a[i][j] = 1.0; // each column of b is the sequence 1,2,...,N for (i=0; i<N; i++) for (j=0; j<P; j++) b[i][j] = i+1.; start = time(NULL); for (l=0; l<NTIMES; l++) { #pragma omp parallel private(i,j,k) { #pragma omp single nowait if (!nthr_checked) { #ifdef _OPENMP nthr = omp_get_num_threads(); #endif printf( "\nWe are using %d thread(s)\n", nthr); nthr_checked = true; } // Initialize product matrix #pragma omp for nowait for (i=0; i<M; i++) for (j=0; j<P; j++) c[i][j] = 0.0; // Parallelize by row. The threads don't need to synchronize at // loop end, so "nowait" can be used. #pragma omp for nowait for (i=0; i<M; i++) { for (k=0; k<N; k++) { // Each element of the product is just the sum 1+2+...+n for (j=0; j<P; j++) { c[i][j] += a[i][k] * b[k][j]; } } } } // #pragma omp parallel private(i,j,k) } // l=0,...NTIMES-1 walltime = time(NULL) - start; printf("\nFinished calculations.\n"); printf("Matmul kernel wall clock time = %.2f sec\n", walltime); printf("Wall clock time/thread = %.2f sec\n", walltime/nthr); printf("MFlops = %f\n", (double)(NTIMES)*(double)(N*M*2)*(double)(P)/walltime/1.0e6); return 0; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 2: with dist_schedule // **************************** // // Test: #iterations == #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd dist_schedule(static,1) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd dist_schedule(static,512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); int ten = 10; int chunkSize = 512/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,1) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,500) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); ten = 10; chunkSize = 500/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,1) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,123) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); ten = 10; chunkSize = 123/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 3: with ds attributes // **************************** // // Test: private // ZERO(A); ZERO(B); double p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) { #pragma omp distribute parallel for simd private(p,q) for(int i = 0 ; i < N ; i++) { p = 2; q = 3; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*2) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); fail = 1; } if (B[i] != TRIALS*3) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation #pragma omp teams num_teams(64) { #pragma omp distribute simd firstprivate(p,q) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: lastprivate // /* int lastpriv = -1; #pragma omp target map(tofrom:lastpriv) #pragma omp teams num_teams(10) #pragma omp distribute parallel for simd lastprivate(lastpriv) for(int i = 0 ; i < omp_get_num_teams() ; i++) lastpriv = i; if(lastpriv != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv, 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); */ printf("Succeeded\n"); // ************************** // Series 4: collapse // ************************** // // Test: 2 loops // double * S = (double *) malloc(N*N*sizeof(double)); double * T = (double *) malloc(N*N*sizeof(double)); double * U = (double *) malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // int M = N/8; double * V = (double *) malloc(M*M*M*sizeof(double)); double * Z = (double *) malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd collapse(3) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
GB_binop__div_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__div_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint16) // A*D function (colscale): GB (_AxD__div_uint16) // D*A function (rowscale): GB (_DxB__div_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint16) // C=scalar+B GB (_bind1st__div_uint16) // C=scalar+B' GB (_bind1st_tran__div_uint16) // C=A+scalar GB (_bind2nd__div_uint16) // C=A'+scalar GB (_bind2nd_tran__div_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__div_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__div_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \ } GrB_Info GB (_bind1st_tran__div_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \ } GrB_Info GB (_bind2nd_tran__div_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gemm_x_csr_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; ++r) { ALPHA_Number *Y = &y[index2(r, 0, ldy)]; for (ALPHA_INT c = 0; c < n; c++) alpha_mule(Y[c], beta); for (ALPHA_INT ai = mat->rows_start[r]; ai < mat->rows_end[r]; ai++) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(mat->col_indx[ai], 0, ldx)]; for (ALPHA_INT c = 0; c < n; ++c) alpha_madde(Y[c], val, X[c]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
dynamic_smagorinsky_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // // System includes #include <vector> #include <map> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "utilities/openmp_utils.h" #include "utilities/geometry_utilities.h" #include "includes/cfd_variables.h" #include "fluid_dynamics_application_variables.h" #include "includes/global_pointer_variables.h" #ifndef KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED #define KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Helper class to dynamically determine a value for the Smagorinsly parameter. /** This class uses the Variational Germano Identity to determine a value for the Smagorinsky parameter. This value is stored in the elemental variable C_SMAGORINSKY, the element implementation is responsible for using it. The ability to assign different values to different patches of elements (identified by the PATCH_INDEX variable) is supported, although it tends to produce unreliable results due to a 0/0 indetermination in patches with smooth velocity fields. This class is based in Oberai, A.A. and Wanderer, J., Variational formulation of the Germano identity for the Navier Stokes equations, Journal of Turbulence, 2005, vol 6. Note that the formulation described there requires a nested mesh. It takes the model part containing a coarse mesh as input and assumes that all elements will be subdivided before CalculateC() is called. Remember to call StoreCoarseMesh before refining the element, otherwise the coarse mesh will be lost. @see VMS for an element implementation that uses the Smagorinsky model. @see Local_Refine_Triangle_Mesh,Local_Refine_Tetrahedra_Mesh for the element refinement process. */ class DynamicSmagorinskyUtils { public: ///@name Life Cycle ///@{ /// Constructor /** @param rModelPart Reference to the model part containing the coarse mesh @param DomainSize Spatial dimension (2 or 3) */ DynamicSmagorinskyUtils(ModelPart& rModelPart, unsigned int DomainSize): mrModelPart(rModelPart), mDomainSize(DomainSize), mCoarseMesh(), mPatchIndices() {} /// Destructor ~DynamicSmagorinskyUtils() {} ///@} ///@name Operations ///@{ /// Store current mesh as coarse mesh. Call before refining. /** If you are refining more than once, this only has to be called before last refinement. */ void StoreCoarseMesh() { // Clear existing mesh (if any) mCoarseMesh.clear(); // Store current mesh for( ModelPart::ElementsContainerType::ptr_iterator itpElem = mrModelPart.Elements().ptr_begin(); itpElem != mrModelPart.Elements().ptr_end(); ++itpElem) { // (*itpElem)->GetValue(C_SMAGORINSKY) = 0.0; // Set the Smagorinsky parameter to zero for the coarse mesh (do this once to reset any input values) mCoarseMesh.push_back(*itpElem); } // Count the number of patches in the model (in parallel) const int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,ElementPartition); std::vector< std::vector<int> > LocalIndices(NumThreads); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mCoarseMesh.begin() + ElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mCoarseMesh.begin() + ElementPartition[k+1]; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { this->AddNewIndex(LocalIndices[k],itElem->GetValue(PATCH_INDEX)); } } // Combine the partial lists and create a map for PATCH_INDEX -> Vector position unsigned int Counter = 0; std::pair<int, unsigned int> NewVal; std::pair< std::map<int, unsigned int>::iterator, bool > Result; for( std::vector< std::vector<int> >::iterator itList = LocalIndices.begin(); itList != LocalIndices.end(); ++itList ) { for( std::vector<int>::iterator itIndex = itList->begin(); itIndex != itList->end(); ++itIndex) { // Note that instering in map already sorts and checks for uniqueness NewVal.first = *itIndex; NewVal.second = Counter; Result = mPatchIndices.insert(NewVal); if (Result.second) ++Counter; } } } /// Provide a value for the Smagorinsky coefficient using the Variational Germano Identity void CalculateC() { // Update the velocity values for the terms that belong to the coarse mesh this->SetCoarseVel(); // Partitioning const int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector CoarseElementPartition,FineElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,CoarseElementPartition); OpenMPUtils::DivideInPartitions(mrModelPart.Elements().size(),NumThreads,FineElementPartition); // Initialize temporary containers unsigned int PatchNumber = mPatchIndices.size(); std::vector< std::vector<double> > GlobalPatchNum(NumThreads); // Numerator on each patch std::vector< std::vector<double> > GlobalPatchDen(NumThreads); // Denominator on each patch const double EnergyTol = 0.005; double TotalDissipation = 0; #pragma omp parallel reduction(+:TotalDissipation) { int k = OpenMPUtils::ThisThread(); // Initialize the iterator boundaries for this thread ModelPart::ElementsContainerType::iterator CoarseElemBegin = mCoarseMesh.begin() + CoarseElementPartition[k]; ModelPart::ElementsContainerType::iterator CoarseElemEnd = mCoarseMesh.begin() + CoarseElementPartition[k+1]; ModelPart::ElementsContainerType::iterator FineElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator FineElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; // Initialize some thread-local variables Vector LocalValues, LocalCoarseVel; Matrix LocalMassMatrix; ProcessInfo& rProcessInfo = mrModelPart.GetProcessInfo(); double Residual,Model; unsigned int PatchPosition; // Thread-local containers for the values in each patch std::vector<double>& rPatchNum = GlobalPatchNum[k]; std::vector<double>& rPatchDen = GlobalPatchDen[k]; rPatchNum.resize(PatchNumber,0.0);// Fill with zeros rPatchDen.resize(PatchNumber,0.0); if (mDomainSize == 2) { LocalValues.resize(9); LocalCoarseVel.resize(9); LocalMassMatrix.resize(9,9,false); array_1d<double,3> N; BoundedMatrix<double,3,2> DN_DX; BoundedMatrix<double,2,2> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } else // mDomainSize == 3 { LocalValues.resize(16); LocalCoarseVel.resize(16); LocalMassMatrix.resize(16,16,false); array_1d<double,4> N; BoundedMatrix<double,4,3> DN_DX; BoundedMatrix<double,3,3> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } } // Combine the results of each thread in position 0 for( std::vector< std::vector<double> >::iterator itNum = GlobalPatchNum.begin()+1, itDen = GlobalPatchDen.begin()+1; itNum != GlobalPatchNum.end(); ++itNum, ++itDen) { for( std::vector<double>::iterator TotalNum = GlobalPatchNum[0].begin(), LocalNum = itNum->begin(), TotalDen = GlobalPatchDen[0].begin(), LocalDen = itDen->begin(); TotalNum != GlobalPatchNum[0].end(); ++TotalNum,++LocalNum,++TotalDen,++LocalDen) { *TotalNum += *LocalNum; *TotalDen += *LocalDen; } } // Compute the smagorinsky coefficient for each patch by combining the values from each thread std::vector<double> PatchC(PatchNumber); double NumTol = EnergyTol * fabs(TotalDissipation); for( std::vector<double>::iterator itNum = GlobalPatchNum[0].begin(), itDen = GlobalPatchDen[0].begin(), itC = PatchC.begin(); itC != PatchC.end(); ++itNum, ++itDen, ++itC) { // If the dissipation we are "missing" by not considering Smagorinsky is small, do not use Smagorinsky (this avoids a division by ~0, as the denominator should go to zero too) if ( (fabs(*itNum) < NumTol) )//|| (fabs(*itDen) < 1.0e-12) ) *itC = 0.0; else *itC = sqrt( 0.5 * fabs( *itNum / *itDen ) ); } // Finally, assign each element its new smagorinsky value #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; unsigned int PatchPosition; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; itElem->GetValue(C_SMAGORINSKY) = PatchC[PatchPosition]; } } } /// For the bridge analysis problem, correct the boundary flag after the refinement. /** Remember to run this AFTER EACH REFINEMENT STEP Possible values for the variable: 1.0 inlet, 2.0 bridge surface, 3.0 outlet, 0.0 otherwise @param rThisVariable The Kratos variable used to identify the boundary */ void CorrectFlagValues(Variable<double>& rThisVariable = FLAG_VARIABLE) { // Loop over coarse mesh to evaluate all terms that do not involve the fine mesh const int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfNodes(),NumThreads,NodePartition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = mrModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = mrModelPart.NodesBegin() + NodePartition[k+1]; double Value0, Value1; for( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) // If the node is refined { Value0 = itNode->GetValue(FATHER_NODES)[0].FastGetSolutionStepValue(rThisVariable); Value1 = itNode->GetValue(FATHER_NODES)[1].FastGetSolutionStepValue(rThisVariable); if( Value0 != Value1 ) // If this node is problematic { if ( Value0 == 0.0 || Value1 == 0.0 ) { // if either of the parents is not on the boundary, this node is not on the boundary itNode->FastGetSolutionStepValue(rThisVariable) = 0.0; } /* All remaining cases are unlikely in well-posed problems, I'm arbitrarily giving priority to the outlet, so that the node is only inlet or bridge surface if both parents are */ else if( Value0 == 3.0 ) { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } else if( Value1 == 3.0 ) { // The node is only bridge surface if both parents are itNode->FastGetSolutionStepValue(rThisVariable) = Value1; } else // Default behaviour: Parent 0 takes precedence { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } } } } } } ///@} private: ///@name Member Variables ///@{ /// ModelPart of the fluid problem ModelPart& mrModelPart; /// Spatial dimenstion unsigned int mDomainSize; /// Container for the coarse mesh (the fine mesh is stored by the model part) ModelPart::ElementsContainerType mCoarseMesh; /// A map relating patch indices to positions in the internal storage arrays std::map<int, unsigned int> mPatchIndices; ///@name Private Operations ///@{ /// Calculate the "Coarse Mesh" velocity /** The operations on the coarse mesh are evaluated on the fine mesh, but using an averaged velocity on the nodes that only exist on the fine mesh. Velocity gradients calculated on the fine mesh using this average velocity will be equal to those that would be obtained using the coarse mesh. This function assigns the "coarse" velocity value to all nodes */ void SetCoarseVel() { /* Note: This loop can't be parallelized, as we are relying on the fact that refined nodes are at the end of the list and their parents will be updated before the refined nodes are reached. There is an alternative solution (always calculate the coarse mesh velocity from the historic database) which can be parallelized but won't work for multiple levels of refinement */ for( ModelPart::NodeIterator itNode = mrModelPart.NodesBegin(); itNode != mrModelPart.NodesEnd(); ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) { Node<3>& rParent1 = itNode->GetValue(FATHER_NODES)[0]; Node<3>& rParent2 = itNode->GetValue(FATHER_NODES)[1]; itNode->GetValue(COARSE_VELOCITY) = 0.5 * ( rParent1.FastGetSolutionStepValue(VELOCITY) + rParent2.FastGetSolutionStepValue(VELOCITY) ); } else { itNode->GetValue(COARSE_VELOCITY) = itNode->FastGetSolutionStepValue(VELOCITY); } } } /// Return the Galerkin (+stabilization) and Model terms for this element (2D version) void GermanoTerms2D(Element& rElem, array_1d<double,3>& rShapeFunc, BoundedMatrix<double,3,2>& rShapeDeriv, BoundedMatrix<double,2,2>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 2; const double NumNodes = 3; // Initialize double Area; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity2D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Area); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Area; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Grad(u) ] double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } // "Fixed" part of Smagorinsky viscosity: Density * FilterWidth^2 * Norm(SymmetricGrad(U)). 2*C^2 is accounted for in the caller function const double sqH = 2*Area; rModel *= Density * sqH * sqrt(SqNorm); } /// Return the Galerkin (+stabilization) and Model terms for this element (3D version) void GermanoTerms3D(Element& rElem, array_1d<double,4>& rShapeFunc, BoundedMatrix<double,4,3>& rShapeDeriv, BoundedMatrix<double,3,3>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 3; const double NumNodes = 4; // Initialize double Volume; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity3D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Volume); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Volume; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Symmetric Grad(u) ] = ( 2 * Sij * Sij )^(1/2), we compute the Sij * Sij part in the following loop: double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } const double cubeH = 6*Volume; rModel *= Density * pow(cubeH, 2.0/3.0) * sqrt(2.0 * SqNorm); } /// Equivalent to VMS2DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity2D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 3; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Equivalent to VMS3DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity3D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 4; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = rCoarseVel[2]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Call the element's member functions to obtain its residual void CalculateResidual(Element& rElement, Matrix& rMassMatrix, ///@todo This matrix and the next vector should be transformed to static members once we find a threadsafe way to do so Vector& rAuxVector, Vector& rResidual, ProcessInfo& rCurrentProcessInfo) { rElement.InitializeNonLinearIteration(rCurrentProcessInfo); // Dynamic stabilization terms rElement.CalculateRightHandSide(rResidual,rCurrentProcessInfo); // Dynamic Terms rElement.CalculateMassMatrix(rMassMatrix,rCurrentProcessInfo); rElement.GetSecondDerivativesVector(rAuxVector,0); noalias(rResidual) -= prod(rMassMatrix,rAuxVector); // Velocity Terms rElement.CalculateLocalVelocityContribution(rMassMatrix,rResidual,rCurrentProcessInfo); // Note that once we are here, we no longer need the mass matrix } /// Check if a patch index is known void AddNewIndex( std::vector<int>& rIndices, int ThisIndex ) { bool IsNew = true; for( std::vector<int>::iterator itIndex = rIndices.begin(); itIndex != rIndices.end(); ++itIndex) { if( ThisIndex == *itIndex) { IsNew = false; break; } } if (IsNew) rIndices.push_back(ThisIndex); } ///@} // Private operations }; ///@} Kratos classes ///@} Application group } // namespace Kratos #endif /* KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED */
aux_propagate.h
#ifndef AUX_PROPAGATE_H #define AUX_PROPAGATE_H #include <cvpp/containers/matrix.h> #include <cvpp/algorithms/icp/libicp.h> #include "class_map.h" #include "class_group.h" using namespace cvpp; /// Propagate objects from Next to Main void propagateObjects( Map& prev , Map& next , Map& main , const int& thrctr , const double& thrfit ) { Timer t; SeqMatd M1p( main.size() ) , M1n( main.size() ); auto Mobs = MatZEROSd( main.size() ); /// Calculate movement t.ptick( "Calculating Movement" ); #pragma omp parallel for forLOOPi( prev.size() ) { if( prev[i].size() >= thrctr && next[i].size() >= thrctr && 0.8 * prev[i].size() < next[i].size() ) { Matd Mp,Mn; // if( next[i].size() < 4 * thrctr ) // { // Mp = prev[i].getP(); // Mn = next[i].getP(); // } // else { Mp = prev[i].getM(); Mn = next[i].getM(); } LibICPd icp( Mn ); icp.fit( Mp , main[i].R , main[i].t , thrfit ); Mobs(i) = true; } } /// Move main groups t.ptick( "Moving Main Groups" ); forLOOPi( main.size() ) { M1p[i] = main[i].getM(); M1n[i] = icp_calc( M1p[i] , main[i].R , main[i].t ); main[i].updM( M1n[i] ); } // Remove main clusters based on next t.ptick( "Removing Main Clusters" ); forLOOPi( main.size() ) { if( main[i].size() >= thrctr && next[i].size() >= thrctr ) { Matd Mm = main[i].getM(); Matd Mn = next[i].getM(); SSeqi idxs; SSeqd dsts; KDtreed tree( Mn ); tree.knnSearch( Mm , 1 , idxs , dsts ); Veci rems; forLOOPj( idxs.size() ) if( dsts[j][0] < 0.5 * main.rad * main.rad ) rems.insert(j); rems.update(); rems.mat().SortRows().FlipRows(); forLOOPj( rems.n() ) main[i].del( rems[j] ); } } /// Copy next clusters to main t.ptick( "Copying Next Clusters" ); forLOOPij( main.size() , next[i].size() ) main[i].push_back( next[i][j] ); forLOOPii( main.size() , next.size() ) main.objs.push_back( next[i] ); // Recalculate main movement t.ptick( "Recalculating Main Movement" ); #pragma omp parallel for forLOOPi( M1p.size() ) { // main[i].v = main[i].calcMov(); if( Mobs(i) ) { Matd M2 = main[i].getM(); if( M1p[i].r() >= thrctr && M2.r() >= thrctr ) { LibICPd icp( M2 ); icp.fit( M1p[i] , main[i].R , main[i].t , thrfit ); Matd d = ( M2 - M1p[i] ).meanRows(); double chg = ( main[i].v - d ).rsqsum(); if( chg > 0.9 ) { main[i].t.setVal(0); main[i].R.setIdentity(); } else { if( main[i].v(0) == 0.0 ) main[i].v = d; else main[i].v = ( main[i].v + d ) / 2.0; if( d.rsqsum() < 0.1 ) main[i].v.setVal(0); main[i].t = main[i].v; main[i].R.setIdentity(); } } } else { Matd d = ( M1n[i] - M1p[i] ).meanRows(); if( main[i].v(0) == 0.0 ) main[i].v = d; else main[i].v = ( main[i].v + d ) / 2.0; main[i].t = main[i].v; main[i].R.setIdentity(); } // main[i].t(2) = 0.0; // No Z motion main[i].savePos(); } forLOOPi( main.size() ) main[i].t.print(); // // Update weights // t.ptick( "Updating Weights" ); // forLOOPi( Mobs.r() ) // { // forLOOPj( main[i].size() ) // main[i][j].S -= main[i].C; // if( !Mobs(i) ) // { // forLOOPj( main[i].size() ) // main[i][j].wgt *= 0.95; // } // else // { // forLOOPj( main[i].size() ) // main[i][j].wgt = 1.0; // main[i].C.setVal( 0.0 ); // } // forLOOPj( main[i].size() ) // main[i][j].S += main[i].C; // } // // Removing Uncertain Groups // t.ptick( "Removing Uncertain Groups" ); // forLOOPi( main.size() ) // { // double w = 0; // forLOOPj( main[i].size() ) w += main[i][j].wgt; // w /= main[i].size(); if( w > 0 && w < 0.25 ) main[i] = Group(); // } t.ptick( "Done" ); } #endif
interpolation_v4.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <math.h> //------------------------------------------------------------------------------------------------------------------------------ static inline void interpolation_v4_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){ // interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[] using volume averaged quartic prolongation int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block int write_dim_j = block->dim.j<<1; int write_dim_k = block->dim.k<<1; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; const double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; if(block->read.box >=0){ read_jStride = level_c->my_boxes[block->read.box ].jStride; read_kStride = level_c->my_boxes[block->read.box ].kStride; read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->box_ghosts*(1+ read_jStride+ read_kStride); } if(block->write.box>=0){ write_jStride = level_f->my_boxes[block->write.box].jStride; write_kStride = level_f->my_boxes[block->write.box].kStride; write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->box_ghosts*(1+write_jStride+write_kStride); } #ifdef USE_NAIVE_INTERP // naive 125pt per fine grid cell int i,j,k; double c2 = -3.0/128.0; double c1 = 22.0/128.0; int dj = read_jStride; int dk = read_kStride; int dj2 = 2*read_jStride; int dk2 = 2*read_kStride; for(k=0;k<write_dim_k;k++){double sk1=c1,sk2=c2;if(k&0x1){sk1=-c1;sk2=-c2;} for(j=0;j<write_dim_j;j++){double sj1=c1,sj2=c2;if(j&0x1){sj1=-c1;sj2=-c2;} for(i=0;i<write_dim_i;i++){double si1=c1,si2=c2;if(i&0x1){si1=-c1;si2=-c2;} int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride); int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride); // // | -3/128 | +22/128 | 1.0 | -22/128 | +3/128 | coarse grid // |-----+-----|-----+-----|-----+-----|-----+-----|-----+-----| // | | | | |?????| | | | | | fine grid // write[write_ijk] = prescale_f*write[write_ijk] + + sk2*( + sj2*( si2*read[read_ijk-2-dj2-dk2] + si1*read[read_ijk-1-dj2-dk2] + read[read_ijk-dj2-dk2] - si1*read[read_ijk+1-dj2-dk2] - si2*read[read_ijk+2-dj2-dk2] ) + sj1*( si2*read[read_ijk-2-dj -dk2] + si1*read[read_ijk-1-dj -dk2] + read[read_ijk-dj -dk2] - si1*read[read_ijk+1-dj -dk2] - si2*read[read_ijk+2-dj -dk2] ) + ( si2*read[read_ijk-2 -dk2] + si1*read[read_ijk-1 -dk2] + read[read_ijk -dk2] - si1*read[read_ijk+1 -dk2] - si2*read[read_ijk+2 -dk2] ) - sj1*( si2*read[read_ijk-2+dj -dk2] + si1*read[read_ijk-1+dj -dk2] + read[read_ijk+dj -dk2] - si1*read[read_ijk+1+dj -dk2] - si2*read[read_ijk+2+dj -dk2] ) - sj2*( si2*read[read_ijk-2+dj2-dk2] + si1*read[read_ijk-1+dj2-dk2] + read[read_ijk+dj2-dk2] - si1*read[read_ijk+1+dj2-dk2] - si2*read[read_ijk+2+dj2-dk2] ) ) + sk1*( + sj2*( si2*read[read_ijk-2-dj2-dk ] + si1*read[read_ijk-1-dj2-dk ] + read[read_ijk-dj2-dk ] - si1*read[read_ijk+1-dj2-dk ] - si2*read[read_ijk+2-dj2-dk ] ) + sj1*( si2*read[read_ijk-2-dj -dk ] + si1*read[read_ijk-1-dj -dk ] + read[read_ijk-dj -dk ] - si1*read[read_ijk+1-dj -dk ] - si2*read[read_ijk+2-dj -dk ] ) + ( si2*read[read_ijk-2 -dk ] + si1*read[read_ijk-1 -dk ] + read[read_ijk -dk ] - si1*read[read_ijk+1 -dk ] - si2*read[read_ijk+2 -dk ] ) - sj1*( si2*read[read_ijk-2+dj -dk ] + si1*read[read_ijk-1+dj -dk ] + read[read_ijk+dj -dk ] - si1*read[read_ijk+1+dj -dk ] - si2*read[read_ijk+2+dj -dk ] ) - sj2*( si2*read[read_ijk-2+dj2-dk ] + si1*read[read_ijk-1+dj2-dk ] + read[read_ijk+dj2-dk ] - si1*read[read_ijk+1+dj2-dk ] - si2*read[read_ijk+2+dj2-dk ] ) ) + ( + sj2*( si2*read[read_ijk-2-dj2 ] + si1*read[read_ijk-1-dj2 ] + read[read_ijk-dj2 ] - si1*read[read_ijk+1-dj2 ] - si2*read[read_ijk+2-dj2 ] ) + sj1*( si2*read[read_ijk-2-dj ] + si1*read[read_ijk-1-dj ] + read[read_ijk-dj ] - si1*read[read_ijk+1-dj ] - si2*read[read_ijk+2-dj ] ) + ( si2*read[read_ijk-2 ] + si1*read[read_ijk-1 ] + read[read_ijk ] - si1*read[read_ijk+1 ] - si2*read[read_ijk+2 ] ) - sj1*( si2*read[read_ijk-2+dj ] + si1*read[read_ijk-1+dj ] + read[read_ijk+dj ] - si1*read[read_ijk+1+dj ] - si2*read[read_ijk+2+dj ] ) - sj2*( si2*read[read_ijk-2+dj2 ] + si1*read[read_ijk-1+dj2 ] + read[read_ijk+dj2 ] - si1*read[read_ijk+1+dj2 ] - si2*read[read_ijk+2+dj2 ] ) ) - sk1*( + sj2*( si2*read[read_ijk-2-dj2+dk ] + si1*read[read_ijk-1-dj2+dk ] + read[read_ijk-dj2+dk ] - si1*read[read_ijk+1-dj2+dk ] - si2*read[read_ijk+2-dj2+dk ] ) + sj1*( si2*read[read_ijk-2-dj +dk ] + si1*read[read_ijk-1-dj +dk ] + read[read_ijk-dj +dk ] - si1*read[read_ijk+1-dj +dk ] - si2*read[read_ijk+2-dj +dk ] ) + ( si2*read[read_ijk-2 +dk ] + si1*read[read_ijk-1 +dk ] + read[read_ijk +dk ] - si1*read[read_ijk+1 +dk ] - si2*read[read_ijk+2 +dk ] ) - sj1*( si2*read[read_ijk-2+dj +dk ] + si1*read[read_ijk-1+dj +dk ] + read[read_ijk+dj +dk ] - si1*read[read_ijk+1+dj +dk ] - si2*read[read_ijk+2+dj +dk ] ) - sj2*( si2*read[read_ijk-2+dj2+dk ] + si1*read[read_ijk-1+dj2+dk ] + read[read_ijk+dj2+dk ] - si1*read[read_ijk+1+dj2+dk ] - si2*read[read_ijk+2+dj2+dk ] ) ) - sk2*( + sj2*( si2*read[read_ijk-2-dj2+dk2] + si1*read[read_ijk-1-dj2+dk2] + read[read_ijk-dj2+dk2] - si1*read[read_ijk+1-dj2+dk2] - si2*read[read_ijk+2-dj2+dk2] ) + sj1*( si2*read[read_ijk-2-dj +dk2] + si1*read[read_ijk-1-dj +dk2] + read[read_ijk-dj +dk2] - si1*read[read_ijk+1-dj +dk2] - si2*read[read_ijk+2-dj +dk2] ) + ( si2*read[read_ijk-2 +dk2] + si1*read[read_ijk-1 +dk2] + read[read_ijk +dk2] - si1*read[read_ijk+1 +dk2] - si2*read[read_ijk+2 +dk2] ) - sj1*( si2*read[read_ijk-2+dj +dk2] + si1*read[read_ijk-1+dj +dk2] + read[read_ijk+dj +dk2] - si1*read[read_ijk+1+dj +dk2] - si2*read[read_ijk+2+dj +dk2] ) - sj2*( si2*read[read_ijk-2+dj2+dk2] + si1*read[read_ijk-1+dj2+dk2] + read[read_ijk+dj2+dk2] - si1*read[read_ijk+1+dj2+dk2] - si2*read[read_ijk+2+dj2+dk2] ) ); }}} #else // exploit tensor product symmetry and perform 8 fine grid interpolations at a time... // 50 x 5pt for i // 20 x 5pt for j // 8 x 5pt for k // ---------------- // 78 x 5pt for 8 cells (vs 8x125pt = 200x5pt in naive) int i,j,k; int ii,jj,kk; double c2 = -3.0/128.0; double c1 = 22.0/128.0; int dj = read_jStride; int dk = read_kStride; int dj2 = 2*read_jStride; int dk2 = 2*read_kStride; for(k=0,kk=0;k<write_dim_k;k+=2,kk++){ for(j=0,jj=0;j<write_dim_j;j+=2,jj++){ // compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], so create a unique restrict pointers for each nonliteral offset... double * __restrict__ write00 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+0)*write_kStride; double * __restrict__ write10 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+0)*write_kStride; double * __restrict__ write01 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+1)*write_kStride; double * __restrict__ write11 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+1)*write_kStride; for(i=0,ii=0;i<write_dim_i;i+=2,ii++){ int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride; // // | -3/128 | +22/128 | 1.0 | -22/128 | +3/128 | coarse grid // |-----+-----|-----+-----|-----+-----|-----+-----|-----+-----| // | | | | |?????| | | | | | fine grid // // grab all coarse grid points... const double c000=read[read_ijk-2-dj2-dk2], c100=read[read_ijk-1-dj2-dk2], c200=read[read_ijk-dj2-dk2], c300=read[read_ijk+1-dj2-dk2], c400=read[read_ijk+2-dj2-dk2]; const double c010=read[read_ijk-2-dj -dk2], c110=read[read_ijk-1-dj -dk2], c210=read[read_ijk-dj -dk2], c310=read[read_ijk+1-dj -dk2], c410=read[read_ijk+2-dj -dk2]; const double c020=read[read_ijk-2 -dk2], c120=read[read_ijk-1 -dk2], c220=read[read_ijk -dk2], c320=read[read_ijk+1 -dk2], c420=read[read_ijk+2 -dk2]; const double c030=read[read_ijk-2+dj -dk2], c130=read[read_ijk-1+dj -dk2], c230=read[read_ijk+dj -dk2], c330=read[read_ijk+1+dj -dk2], c430=read[read_ijk+2+dj -dk2]; const double c040=read[read_ijk-2+dj2-dk2], c140=read[read_ijk-1+dj2-dk2], c240=read[read_ijk+dj2-dk2], c340=read[read_ijk+1+dj2-dk2], c440=read[read_ijk+2+dj2-dk2]; const double c001=read[read_ijk-2-dj2-dk ], c101=read[read_ijk-1-dj2-dk ], c201=read[read_ijk-dj2-dk ], c301=read[read_ijk+1-dj2-dk ], c401=read[read_ijk+2-dj2-dk ]; const double c011=read[read_ijk-2-dj -dk ], c111=read[read_ijk-1-dj -dk ], c211=read[read_ijk-dj -dk ], c311=read[read_ijk+1-dj -dk ], c411=read[read_ijk+2-dj -dk ]; const double c021=read[read_ijk-2 -dk ], c121=read[read_ijk-1 -dk ], c221=read[read_ijk -dk ], c321=read[read_ijk+1 -dk ], c421=read[read_ijk+2 -dk ]; const double c031=read[read_ijk-2+dj -dk ], c131=read[read_ijk-1+dj -dk ], c231=read[read_ijk+dj -dk ], c331=read[read_ijk+1+dj -dk ], c431=read[read_ijk+2+dj -dk ]; const double c041=read[read_ijk-2+dj2-dk ], c141=read[read_ijk-1+dj2-dk ], c241=read[read_ijk+dj2-dk ], c341=read[read_ijk+1+dj2-dk ], c441=read[read_ijk+2+dj2-dk ]; const double c002=read[read_ijk-2-dj2 ], c102=read[read_ijk-1-dj2 ], c202=read[read_ijk-dj2 ], c302=read[read_ijk+1-dj2 ], c402=read[read_ijk+2-dj2 ]; const double c012=read[read_ijk-2-dj ], c112=read[read_ijk-1-dj ], c212=read[read_ijk-dj ], c312=read[read_ijk+1-dj ], c412=read[read_ijk+2-dj ]; const double c022=read[read_ijk-2 ], c122=read[read_ijk-1 ], c222=read[read_ijk ], c322=read[read_ijk+1 ], c422=read[read_ijk+2 ]; const double c032=read[read_ijk-2+dj ], c132=read[read_ijk-1+dj ], c232=read[read_ijk+dj ], c332=read[read_ijk+1+dj ], c432=read[read_ijk+2+dj ]; const double c042=read[read_ijk-2+dj2 ], c142=read[read_ijk-1+dj2 ], c242=read[read_ijk+dj2 ], c342=read[read_ijk+1+dj2 ], c442=read[read_ijk+2+dj2 ]; const double c003=read[read_ijk-2-dj2+dk ], c103=read[read_ijk-1-dj2+dk ], c203=read[read_ijk-dj2+dk ], c303=read[read_ijk+1-dj2+dk ], c403=read[read_ijk+2-dj2+dk ]; const double c013=read[read_ijk-2-dj +dk ], c113=read[read_ijk-1-dj +dk ], c213=read[read_ijk-dj +dk ], c313=read[read_ijk+1-dj +dk ], c413=read[read_ijk+2-dj +dk ]; const double c023=read[read_ijk-2 +dk ], c123=read[read_ijk-1 +dk ], c223=read[read_ijk +dk ], c323=read[read_ijk+1 +dk ], c423=read[read_ijk+2 +dk ]; const double c033=read[read_ijk-2+dj +dk ], c133=read[read_ijk-1+dj +dk ], c233=read[read_ijk+dj +dk ], c333=read[read_ijk+1+dj +dk ], c433=read[read_ijk+2+dj +dk ]; const double c043=read[read_ijk-2+dj2+dk ], c143=read[read_ijk-1+dj2+dk ], c243=read[read_ijk+dj2+dk ], c343=read[read_ijk+1+dj2+dk ], c443=read[read_ijk+2+dj2+dk ]; const double c004=read[read_ijk-2-dj2+dk2], c104=read[read_ijk-1-dj2+dk2], c204=read[read_ijk-dj2+dk2], c304=read[read_ijk+1-dj2+dk2], c404=read[read_ijk+2-dj2+dk2]; const double c014=read[read_ijk-2-dj +dk2], c114=read[read_ijk-1-dj +dk2], c214=read[read_ijk-dj +dk2], c314=read[read_ijk+1-dj +dk2], c414=read[read_ijk+2-dj +dk2]; const double c024=read[read_ijk-2 +dk2], c124=read[read_ijk-1 +dk2], c224=read[read_ijk +dk2], c324=read[read_ijk+1 +dk2], c424=read[read_ijk+2 +dk2]; const double c034=read[read_ijk-2+dj +dk2], c134=read[read_ijk-1+dj +dk2], c234=read[read_ijk+dj +dk2], c334=read[read_ijk+1+dj +dk2], c434=read[read_ijk+2+dj +dk2]; const double c044=read[read_ijk-2+dj2+dk2], c144=read[read_ijk-1+dj2+dk2], c244=read[read_ijk+dj2+dk2], c344=read[read_ijk+1+dj2+dk2], c444=read[read_ijk+2+dj2+dk2]; // interpolate in i to create fine i / coarse jk points... const double f0c00 = ( c200 + c1*(c100-c300) + c2*(c000-c400) ); // same as original 5pt stencil... f0c00 = ( c2*c000 + c1*c100 + c200 - c1*c300 - c2*c400 ) const double f1c00 = ( c200 - c1*(c100-c300) - c2*(c000-c400) ); const double f0c10 = ( c210 + c1*(c110-c310) + c2*(c010-c410) ); const double f1c10 = ( c210 - c1*(c110-c310) - c2*(c010-c410) ); const double f0c20 = ( c220 + c1*(c120-c320) + c2*(c020-c420) ); const double f1c20 = ( c220 - c1*(c120-c320) - c2*(c020-c420) ); const double f0c30 = ( c230 + c1*(c130-c330) + c2*(c030-c430) ); const double f1c30 = ( c230 - c1*(c130-c330) - c2*(c030-c430) ); const double f0c40 = ( c240 + c1*(c140-c340) + c2*(c040-c440) ); const double f1c40 = ( c240 - c1*(c140-c340) - c2*(c040-c440) ); const double f0c01 = ( c201 + c1*(c101-c301) + c2*(c001-c401) ); const double f1c01 = ( c201 - c1*(c101-c301) - c2*(c001-c401) ); const double f0c11 = ( c211 + c1*(c111-c311) + c2*(c011-c411) ); const double f1c11 = ( c211 - c1*(c111-c311) - c2*(c011-c411) ); const double f0c21 = ( c221 + c1*(c121-c321) + c2*(c021-c421) ); const double f1c21 = ( c221 - c1*(c121-c321) - c2*(c021-c421) ); const double f0c31 = ( c231 + c1*(c131-c331) + c2*(c031-c431) ); const double f1c31 = ( c231 - c1*(c131-c331) - c2*(c031-c431) ); const double f0c41 = ( c241 + c1*(c141-c341) + c2*(c041-c441) ); const double f1c41 = ( c241 - c1*(c141-c341) - c2*(c041-c441) ); const double f0c02 = ( c202 + c1*(c102-c302) + c2*(c002-c402) ); const double f1c02 = ( c202 - c1*(c102-c302) - c2*(c002-c402) ); const double f0c12 = ( c212 + c1*(c112-c312) + c2*(c012-c412) ); const double f1c12 = ( c212 - c1*(c112-c312) - c2*(c012-c412) ); const double f0c22 = ( c222 + c1*(c122-c322) + c2*(c022-c422) ); const double f1c22 = ( c222 - c1*(c122-c322) - c2*(c022-c422) ); const double f0c32 = ( c232 + c1*(c132-c332) + c2*(c032-c432) ); const double f1c32 = ( c232 - c1*(c132-c332) - c2*(c032-c432) ); const double f0c42 = ( c242 + c1*(c142-c342) + c2*(c042-c442) ); const double f1c42 = ( c242 - c1*(c142-c342) - c2*(c042-c442) ); const double f0c03 = ( c203 + c1*(c103-c303) + c2*(c003-c403) ); const double f1c03 = ( c203 - c1*(c103-c303) - c2*(c003-c403) ); const double f0c13 = ( c213 + c1*(c113-c313) + c2*(c013-c413) ); const double f1c13 = ( c213 - c1*(c113-c313) - c2*(c013-c413) ); const double f0c23 = ( c223 + c1*(c123-c323) + c2*(c023-c423) ); const double f1c23 = ( c223 - c1*(c123-c323) - c2*(c023-c423) ); const double f0c33 = ( c233 + c1*(c133-c333) + c2*(c033-c433) ); const double f1c33 = ( c233 - c1*(c133-c333) - c2*(c033-c433) ); const double f0c43 = ( c243 + c1*(c143-c343) + c2*(c043-c443) ); const double f1c43 = ( c243 - c1*(c143-c343) - c2*(c043-c443) ); const double f0c04 = ( c204 + c1*(c104-c304) + c2*(c004-c404) ); const double f1c04 = ( c204 - c1*(c104-c304) - c2*(c004-c404) ); const double f0c14 = ( c214 + c1*(c114-c314) + c2*(c014-c414) ); const double f1c14 = ( c214 - c1*(c114-c314) - c2*(c014-c414) ); const double f0c24 = ( c224 + c1*(c124-c324) + c2*(c024-c424) ); const double f1c24 = ( c224 - c1*(c124-c324) - c2*(c024-c424) ); const double f0c34 = ( c234 + c1*(c134-c334) + c2*(c034-c434) ); const double f1c34 = ( c234 - c1*(c134-c334) - c2*(c034-c434) ); const double f0c44 = ( c244 + c1*(c144-c344) + c2*(c044-c444) ); const double f1c44 = ( c244 - c1*(c144-c344) - c2*(c044-c444) ); // interpolate in j to create fine ij / coarse k points... const double f00c0 = (f0c20 + c1*(f0c10-f0c30) + c2*(f0c00-f0c40) ); const double f10c0 = (f1c20 + c1*(f1c10-f1c30) + c2*(f1c00-f1c40) ); const double f01c0 = (f0c20 - c1*(f0c10-f0c30) - c2*(f0c00-f0c40) ); const double f11c0 = (f1c20 - c1*(f1c10-f1c30) - c2*(f1c00-f1c40) ); const double f00c1 = (f0c21 + c1*(f0c11-f0c31) + c2*(f0c01-f0c41) ); const double f10c1 = (f1c21 + c1*(f1c11-f1c31) + c2*(f1c01-f1c41) ); const double f01c1 = (f0c21 - c1*(f0c11-f0c31) - c2*(f0c01-f0c41) ); const double f11c1 = (f1c21 - c1*(f1c11-f1c31) - c2*(f1c01-f1c41) ); const double f00c2 = (f0c22 + c1*(f0c12-f0c32) + c2*(f0c02-f0c42) ); const double f10c2 = (f1c22 + c1*(f1c12-f1c32) + c2*(f1c02-f1c42) ); const double f01c2 = (f0c22 - c1*(f0c12-f0c32) - c2*(f0c02-f0c42) ); const double f11c2 = (f1c22 - c1*(f1c12-f1c32) - c2*(f1c02-f1c42) ); const double f00c3 = (f0c23 + c1*(f0c13-f0c33) + c2*(f0c03-f0c43) ); const double f10c3 = (f1c23 + c1*(f1c13-f1c33) + c2*(f1c03-f1c43) ); const double f01c3 = (f0c23 - c1*(f0c13-f0c33) - c2*(f0c03-f0c43) ); const double f11c3 = (f1c23 - c1*(f1c13-f1c33) - c2*(f1c03-f1c43) ); const double f00c4 = (f0c24 + c1*(f0c14-f0c34) + c2*(f0c04-f0c44) ); const double f10c4 = (f1c24 + c1*(f1c14-f1c34) + c2*(f1c04-f1c44) ); const double f01c4 = (f0c24 - c1*(f0c14-f0c34) - c2*(f0c04-f0c44) ); const double f11c4 = (f1c24 - c1*(f1c14-f1c34) - c2*(f1c04-f1c44) ); // interpolate in k to create fine ijk points... const double f000 = (f00c2 + c1*(f00c1-f00c3) + c2*(f00c0-f00c4) ); const double f100 = (f10c2 + c1*(f10c1-f10c3) + c2*(f10c0-f10c4) ); const double f010 = (f01c2 + c1*(f01c1-f01c3) + c2*(f01c0-f01c4) ); const double f110 = (f11c2 + c1*(f11c1-f11c3) + c2*(f11c0-f11c4) ); const double f001 = (f00c2 - c1*(f00c1-f00c3) - c2*(f00c0-f00c4) ); const double f101 = (f10c2 - c1*(f10c1-f10c3) - c2*(f10c0-f10c4) ); const double f011 = (f01c2 - c1*(f01c1-f01c3) - c2*(f01c0-f01c4) ); const double f111 = (f11c2 - c1*(f11c1-f11c3) - c2*(f11c0-f11c4) ); // commit to memory... #if 0 // compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], and thus cannot vectorize... int write_ijk = ( i+write_i) + ( j+write_j)*write_jStride + ( k+write_k)*write_kStride; write[write_ijk ] = prescale_f*write[write_ijk ] + f000; write[write_ijk+1 ] = prescale_f*write[write_ijk+1 ] + f100; write[write_ijk +write_jStride ] = prescale_f*write[write_ijk +write_jStride ] + f010; write[write_ijk+1+write_jStride ] = prescale_f*write[write_ijk+1+write_jStride ] + f110; write[write_ijk +write_kStride] = prescale_f*write[write_ijk +write_kStride] + f001; write[write_ijk+1 +write_kStride] = prescale_f*write[write_ijk+1 +write_kStride] + f101; write[write_ijk +write_jStride+write_kStride] = prescale_f*write[write_ijk +write_jStride+write_kStride] + f011; write[write_ijk+1+write_jStride+write_kStride] = prescale_f*write[write_ijk+1+write_jStride+write_kStride] + f111; #else // use a unique restrict pointer for each pencil... write00[i ] = prescale_f*write00[i ] + f000; write00[i+1] = prescale_f*write00[i+1] + f100; write10[i ] = prescale_f*write10[i ] + f010; write10[i+1] = prescale_f*write10[i+1] + f110; write01[i ] = prescale_f*write01[i ] + f001; write01[i+1] = prescale_f*write01[i+1] + f101; write11[i ] = prescale_f*write11[i ] + f011; write11[i+1] = prescale_f*write11[i+1] + f111; #endif }}} #endif } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) volumetric quartic interpolation on vector id_c of the coarse level and increments prescale_f*vector id_f on the fine level by the result // i.e. id_f = prescale_f*id_f + P*id_c // prescale_f is nominally 1.0 or 0.0 // quartic interpolation requires a full ghost zone exchange and boundary condition // This is a rather bulk synchronous implementation which packs all MPI buffers before initiating any sends // Similarly, it waits for all remote data before copying any into local boxes. // It does however attempt to overlap local interpolation with MPI void interpolation_v4(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){ exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX); apply_BCs_v4(level_c,id_c,STENCIL_SHAPE_BOX); double _timeCommunicationStart = getTime(); double _timeStart,_timeEnd; int buffer=0; int n; int my_tag = (level_f->tag<<4) | 0x7; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends... int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs; MPI_Request *recv_requests = level_f->interpolation.requests; MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs; // loop through packed list of MPI receives and prepost Irecv's... if(level_f->interpolation.num_recvs>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_f->interpolation.num_recvs;n++){ MPI_Irecv(level_f->interpolation.recv_buffers[n], level_f->interpolation.recv_sizes[n], MPI_DOUBLE, level_f->interpolation.recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = getTime(); level_f->timers.interpolation_recv += (_timeEnd-_timeStart); } // pack MPI send buffers... if(level_c->interpolation.num_blocks[0]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0]) for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){ // !!! prescale==0 because you don't want to increment the MPI buffer interpolation_v4_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]); } _timeEnd = getTime(); level_f->timers.interpolation_pack += (_timeEnd-_timeStart); } // loop through MPI send buffers and post Isend's... if(level_c->interpolation.num_sends>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_c->interpolation.num_sends;n++){ MPI_Isend(level_c->interpolation.send_buffers[n], level_c->interpolation.send_sizes[n], MPI_DOUBLE, level_c->interpolation.send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = getTime(); level_f->timers.interpolation_send += (_timeEnd-_timeStart); } #endif // perform local interpolation... try and hide within Isend latency... if(level_c->interpolation.num_blocks[1]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1]) for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){ interpolation_v4_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]); } _timeEnd = getTime(); level_f->timers.interpolation_local += (_timeEnd-_timeStart); } // wait for MPI to finish... #ifdef USE_MPI if(nMessages>0){ _timeStart = getTime(); MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status); _timeEnd = getTime(); level_f->timers.interpolation_wait += (_timeEnd-_timeStart); } // unpack MPI receive buffers if(level_f->interpolation.num_blocks[2]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2]) for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){ IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]); } _timeEnd = getTime(); level_f->timers.interpolation_unpack += (_timeEnd-_timeStart); } #endif level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart); }
DRB002-antidep1-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A loop with loop-carried anti-dependence. Data race pair: a[i+1]@67:10 vs. a[i]@67:5 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char * argv[]) { int i; int len = 1000; int a[len]; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=i; } #pragma cetus private(i) #pragma loop name main#1 for (i=0; i<(len-1); i ++ ) { a[i]=(a[i+1]+1); } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<len; i ++ ) { printf("%d\n", a[i]); } _ret_val_0=0; return _ret_val_0; }
GB_builder.c
//------------------------------------------------------------------------------ // GB_builder: build a matrix from tuples //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // CALLED BY: GB_build, GB_wait, and GB_transpose // CALLS: Generated/GB_red_build__* workers // This function is called by GB_build to build a matrix T for GrB_Matrix_build // or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending // tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can // appear if called by GB_build or GB_wait, but not GB_transpose. // The indices are provided either as (I_input,J_input) or (I_work,J_work), not // both. The values are provided as S_input or S_work, not both. On return, // the *work arrays are either transplanted into T, or freed, since they are // temporary workspaces. // The work is done in major 5 Steps, some of which can be skipped, depending // on how the tuples are provided (*_work or *_input), and whether or not they // are sorted, or have duplicates. If vdim <= 1, some work is skipped (for // GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on // input. Let p be the # of threads used. // STEP 1: copy user input. O(e/p) read/write per thread, or skipped. // STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if // the tuples are already sorted. // STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no // duplicates, or skipped if already done. O(e/p) read/writes // per thread if duplicates appear. // STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if // T is a vector. // STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the // values can be transplanted into T as-is. // For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already // sorted with no duplicates, and no typecasting needs to be done, then Step 1 // still must be done (each thread does O(e/p) reads of (I_input,J_input) and // writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3 // are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then // I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread // to copy S into T->x. // For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes // per thread. The input is always a vector, so vdim == 1 always holds. Step // 2 is skipped if the indices are already sorted, and Step 3 does no work at // all unless duplicates appear. Step 4 takes no time, for any vector. Step 5 // does O(e/p) reads/writes per thread. // For GrB_reduce_to_vector: like GrB_Vector_build, but many duplicates are // likely, and the indices will not be sorted. The input is always a single // vector (vdim == 1). Step 1 only does a parallel memcpy, from I_input to // I_work. Step 2 takes O((e log e)/p) time to sort the (i,k) tuples. Step 3 // does O(e/p) read/writes. Step 4 takes no time. Step 5 does O(e/p) // read/writes per thread. // For GB_wait: the pending tuples are provided as I_work, J_work, and S_work, // so Step 1 is skipped (no need to check for invalid indices). The input // J_work may be null (vdim can be anything, since GB_wait is used for both // vectors and matrices). The tuples might be in sorted order already, which // is known precisely known from A->Pending->sorted. Step 2 does O((e log e)/p) // work to sort the tuples. Duplicates may appear, and out-of-order tuples are // likely. Step 3 does O(e/p) read/writes. Step 4 does O(e/p) reads per // thread of (I_work,J_work), or just I_work. Step 5 does O(e/p) read/writes // per thread, or O(1) time if S_work can be transplanted into T->x. // For GB_transpose: uses I_work, J_work, and either S_input (if no op applied // to the values) or S_work (if an op was applied to the A->x values). This is // only done for matrices, not vectors, so vdim > 1 will always hold. The // indices are valid so Step 1 is skipped. The tuples are not sorted, so Step // 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so // Step 3 only does O(e/p) reads of J_work to count the vectors in each slice. // Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5 // does O(e/p) read/writes per thread, but it uses the simpler case in // GB_reduce_build_template since no duplicates can appear. It is unlikely // able to transplant S_work into T->x since the input will almost always be // unsorted. #include "GB_build.h" #include "GB_sort.h" #ifndef GBCOMPACT #include "GB_red__include.h" #endif #define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t]) #define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t])) #define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t])) #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (*I_work_handle, ijslen, sizeof (int64_t)) ; \ GB_FREE_MEMORY (*J_work_handle, ijslen, sizeof (int64_t)) ; \ GB_FREE_MEMORY (*S_work_handle, ijslen, ssize) ; \ GB_FREE_MEMORY (K_work, nvals, sizeof (int64_t)) ; \ GB_FREE_MEMORY (W0, nvals, sizeof (int64_t)) ; \ GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ; \ GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ; \ } //------------------------------------------------------------------------------ // GB_builder //------------------------------------------------------------------------------ GrB_Info GB_builder // build a matrix from tuples ( GrB_Matrix *Thandle, // matrix T to build const GrB_Type ttype, // type of output matrix T const int64_t vlen, // length of each vector of T const int64_t vdim, // number of vectors in T const bool is_csc, // true if T is CSC, false if CSR int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples int64_t **J_work_handle, // for (j,i,k) tuples GB_void **S_work_handle, // array of values of tuples, size ijslen bool known_sorted, // true if tuples known to be sorted bool known_no_duplicates, // true if tuples known to not have dupl int64_t ijslen, // size of I_work and J_work arrays const bool is_matrix, // true if T a GrB_Matrix, false if vector const bool ijcheck, // true if I_input,J_input must be checked const int64_t *restrict I_input,// original indices, size nvals const int64_t *restrict J_input,// original indices, size nvals const GB_void *restrict S_input,// array of values of tuples, size nvals const int64_t nvals, // number of tuples, and size of K_work const GrB_BinaryOp dup, // binary function to assemble duplicates, // if NULL use the SECOND operator to // keep the most recent duplicate. const GB_Type_code scode, // GB_Type_code of S_work or S_input array GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (Thandle != NULL) ; ASSERT (nvals >= 0) ; ASSERT (scode <= GB_UDT_code) ; ASSERT_OK (GB_check (ttype, "ttype for builder", GB0)) ; ASSERT_OK_OR_NULL (GB_check (dup, "dup for builder", GB0)) ; ASSERT (I_work_handle != NULL) ; ASSERT (J_work_handle != NULL) ; ASSERT (S_work_handle != NULL) ; //-------------------------------------------------------------------------- // get S //-------------------------------------------------------------------------- GB_void *restrict S_work = (*S_work_handle) ; const GB_void *restrict S = (S_work == NULL) ? S_input : S_work ; size_t tsize = ttype->size ; size_t ssize = GB_code_size (scode, tsize) ; ASSERT (S != NULL) ; //========================================================================== // symbolic phase of the build ============================================= //========================================================================== // The symbolic phase sorts the tuples and finds any duplicates. The // output matrix T is constructed (not including T->i and T->x), and T->h // and T->p are computed. Then I_work is transplanted into T->i, or T->i is // allocated. T->x is then allocated. It is not computed until the // numeric phase. // When this function returns, I_work is either freed or transplanted into // T->i. J_work is freed, and the I_work and J_work pointers (in the // caller) are set to NULL by setting their handles to NULL. Note that // J_work may already be NULL on input, if T has one or zero vectors // (J_work_handle is always non-NULL however). GrB_Info info ; GrB_Matrix T = NULL ; (*Thandle) = NULL ; int64_t *restrict I_work = (*I_work_handle) ; int64_t *restrict J_work = (*J_work_handle) ; int64_t *restrict K_work = NULL ; int64_t *restrict W0 = NULL ; int64_t *restrict W1 = NULL ; int64_t *restrict W2 = NULL ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // partition the tuples for the threads //-------------------------------------------------------------------------- // Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1. // Each thread handles about the same number of tuples. This partition // depends only on nvals. int64_t tstart_slice [nthreads+1] ; // first tuple in each slice tstart_slice [0] = 0 ; for (int tid = 1 ; tid < nthreads ; tid++) { tstart_slice [tid] = GB_PART (tid, nvals, nthreads) ; } tstart_slice [nthreads] = nvals ; // tnvec_slice [tid]: # of vectors that start in a slice. If a vector // starts in one slice and ends in another, it is // counted as being in the first slice. // tnz_slice [tid]: # of entries in a slice after removing duplicates int64_t tnvec_slice [nthreads+1] ; int64_t tnz_slice [nthreads+1] ; // sentinel values for the final cumulative sum tnvec_slice [nthreads] = 0 ; tnz_slice [nthreads] = 0 ; // this becomes true if the first pass computes tnvec_slice and tnz_slice, // and if the (I_input,J_input) tuples were found to be already sorted with // no duplicates present. bool tnvec_and_tnz_slice_computed = false ; //-------------------------------------------------------------------------- // STEP 1: copy user input and check if valid //-------------------------------------------------------------------------- // If the indices are provided by (I_input,J_input), then import them into // (I_work,J_work) and check if they are valid, and sorted. If the input // happens to be already sorted, then duplicates are detected and the # of // vectors in each slice is counted. if (I_work == NULL) { //---------------------------------------------------------------------- // allocate I_work //---------------------------------------------------------------------- // allocate workspace to load and sort the index tuples: // vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k] // vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and // j = J_input [k]. If the tuples are found to be already sorted on // input, then J_work is not allocated, and J_input is used instead. // The k value in the tuple gives the position in the original set of // tuples: I_input [k] and S [k] when vdim <= 1, and also J_input [k] // for matrices with vdim > 1. // The workspace I_work and J_work are allocated here but freed (or // transplanted) inside GB_builder. K_work is allocated, used, and // freed in GB_builder. ASSERT (J_work == NULL) ; GB_MALLOC_MEMORY (I_work, nvals, sizeof (int64_t)) ; (*I_work_handle) = I_work ; ijslen = nvals ; if (I_work == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // create the tuples to sort, and check for any invalid indices //---------------------------------------------------------------------- known_sorted = true ; bool no_duplicates_found = true ; if (nvals == 0) { //------------------------------------------------------------------ // nothing to do //------------------------------------------------------------------ ; } else if (is_matrix) { //------------------------------------------------------------------ // C is a matrix; check both I_input and J_input //------------------------------------------------------------------ ASSERT (J_input != NULL) ; ASSERT (I_work != NULL) ; ASSERT (vdim >= 0) ; ASSERT (I_input != NULL) ; int64_t kbad [nthreads] ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:known_sorted) reduction(&&:no_duplicates_found) for (int tid = 0 ; tid < nthreads ; tid++) { kbad [tid] = -1 ; int64_t my_tnvec = 0 ; int64_t kstart = tstart_slice [tid] ; int64_t kend = tstart_slice [tid+1] ; int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ; int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ; for (int64_t k = kstart ; k < kend ; k++) { // get k-th index from user input: (i,j) int64_t i = I_input [k] ; int64_t j = J_input [k] ; if (i < 0 || i >= vlen || j < 0 || j >= vdim) { // halt if out of bounds kbad [tid] = k ; break ; } // check if the tuples are already sorted known_sorted = known_sorted && ((jlast < j) || (jlast == j && ilast <= i)) ; // check if this entry is a duplicate of the one before it no_duplicates_found = no_duplicates_found && (!(jlast == j && ilast == i)) ; // copy the tuple into I_work. J_work is done later. I_work [k] = i ; if (j > jlast) { // vector j starts in this slice (but this is // valid only if J_input is sorted on input) my_tnvec++ ; } // log the last index seen ilast = i ; jlast = j ; } // these are valid only if I_input and J_input are sorted on // input, with no duplicates present. tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = kend - kstart ; } // collect the report from each thread for (int tid = 0 ; tid < nthreads ; tid++) { if (kbad [tid] >= 0) { // invalid index GB_FREE_WORK ; int64_t i = I_input [kbad [tid]] ; int64_t j = J_input [kbad [tid]] ; int64_t row = is_csc ? i : j ; int64_t col = is_csc ? j : i ; int64_t nrows = is_csc ? vlen : vdim ; int64_t ncols = is_csc ? vdim : vlen ; return (GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, (GB_LOG, "index ("GBd","GBd") out of bounds," " must be < ("GBd", "GBd")", row, col, nrows, ncols))) ; } } // if the tuples were found to be already in sorted order, and if // no duplicates were found, then tnvec_slice and tnz_slice are now // valid, Otherwise, they can only be computed after sorting. tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ; //------------------------------------------------------------------ // allocate J_work, if needed //------------------------------------------------------------------ if (vdim > 1 && !known_sorted) { // copy J_input into J_work, so the tuples can be sorted GB_MALLOC_MEMORY (J_work, nvals, sizeof (int64_t)) ; (*J_work_handle) = J_work ; if (J_work == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads); } else { // J_work is a shallow copy of J_input. The pointer is not // copied into (*J_work_handle), so it will not be freed. // J_input is not modified, even though it is typecast to the // int64_t *J_work, since J_work is not modified in this case. J_work = (int64_t *) J_input ; } } else if (ijcheck) { //------------------------------------------------------------------ // C is a typecasted GrB_Vector; check only I_input //------------------------------------------------------------------ ASSERT (I_input != NULL) ; ASSERT (J_input == NULL) ; ASSERT (vdim == 1) ; int64_t kbad [nthreads] ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:known_sorted) reduction(&&:no_duplicates_found) for (int tid = 0 ; tid < nthreads ; tid++) { kbad [tid] = -1 ; int64_t kstart = tstart_slice [tid] ; int64_t kend = tstart_slice [tid+1] ; int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ; for (int64_t k = kstart ; k < kend ; k++) { // get k-th index from user input: (i) int64_t i = I_input [k] ; if (i < 0 || i >= vlen) { // halt if out of bounds kbad [tid] = k ; break ; } // check if the tuples are already sorted known_sorted = known_sorted && (ilast <= i) ; // check if this entry is a duplicate of the one before it no_duplicates_found = no_duplicates_found && (!(ilast == i)) ; // copy the tuple into the work arrays to be sorted I_work [k] = i ; // log the last index seen ilast = i ; } } // collect the report from each thread for (int tid = 0 ; tid < nthreads ; tid++) { if (kbad [tid] >= 0) { // invalid index GB_FREE_WORK ; int64_t i = I_input [kbad [tid]] ; return (GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, (GB_LOG, "index ("GBd") out of bounds, must be < ("GBd")", i, vlen))) ; } } } else { //------------------------------------------------------------------ // GB_reduce_to_vector: do not check I_input, assume not sorted //------------------------------------------------------------------ // Many duplicates are possible, since the tuples are being used to // construct a single vector. For a CSC format, each entry A(i,j) // becomes an (i,aij) tuple, with the vector index j discarded. All // entries in a single row i are reduced to a single entry in the // vector. The input is unlikely to be sorted, so do not bother to // check. GB_memcpy (I_work, I_input, nvals * sizeof (int64_t), nthreads) ; known_sorted = false ; } //---------------------------------------------------------------------- // determine if duplicates are possible //---------------------------------------------------------------------- // The input is now known to be sorted, or not. If it is sorted, and // if no duplicates were found, then it is known to have no duplicates. // Otherwise, duplicates might appear, but a sort is required first to // check for duplicates. known_no_duplicates = known_sorted && no_duplicates_found ; } //-------------------------------------------------------------------------- // STEP 2: sort the tuples in ascending order //-------------------------------------------------------------------------- // If the tuples are known to already be sorted, Step 2 is skipped. In // that case, K_work is NULL (not allocated), which implicitly means that // K_work [k] = k for all k = 0:nvals-1. if (!known_sorted) { // create the k part of each tuple GB_MALLOC_MEMORY (K_work, nvals, sizeof (int64_t)) ; if (K_work == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } // The k part of each tuple (i,k) or (j,i,k) records the original // position of the tuple in the input list. This allows an unstable // sorting algorith to be used. Since k is unique, it forces the // result of the sort to be stable regardless of whether or not the // sorting algorithm is stable. It also keeps track of where the // numerical value of the tuple can be found; it is in S[k] for the // tuple (i,k) or (j,i,k), regardless of where the tuple appears in the // list after it is sorted. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < nvals ; k++) { K_work [k] = k ; } // sort all the tuples if (vdim > 1) { //------------------------------------------------------------------ // sort a set of (j,i,k) tuples //------------------------------------------------------------------ if (nthreads == 1) { //-------------------------------------------------------------- // sequential quicksort //-------------------------------------------------------------- GB_qsort_3 (J_work, I_work, K_work, nvals) ; } else { //-------------------------------------------------------------- // parallel mergesort //-------------------------------------------------------------- GB_MALLOC_MEMORY (W0, nvals, sizeof (int64_t)) ; GB_MALLOC_MEMORY (W1, nvals, sizeof (int64_t)) ; GB_MALLOC_MEMORY (W2, nvals, sizeof (int64_t)) ; if (W0 == NULL || W1 == NULL || W2 == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } GB_msort_3 (J_work, I_work, K_work, W0, W1, W2, nvals, nthreads) ; GB_FREE_MEMORY (W0, nvals, sizeof (int64_t)) ; GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ; GB_FREE_MEMORY (W2, nvals, sizeof (int64_t)) ; } } else { //------------------------------------------------------------------ // sort a set of (i,k) tuples //------------------------------------------------------------------ if (nthreads == 1) { //-------------------------------------------------------------- // sequential quicksort //-------------------------------------------------------------- GB_qsort_2 (I_work, K_work, nvals) ; } else { //-------------------------------------------------------------- // parallel mergesort //-------------------------------------------------------------- GB_MALLOC_MEMORY (W0, nvals, sizeof (int64_t)) ; GB_MALLOC_MEMORY (W1, nvals, sizeof (int64_t)) ; if (W0 == NULL || W1 == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } GB_msort_2 (I_work, K_work, W0, W1, nvals, nthreads) ; GB_FREE_MEMORY (W0, nvals, sizeof (int64_t)) ; GB_FREE_MEMORY (W1, nvals, sizeof (int64_t)) ; } } } //-------------------------------------------------------------------------- // STEP 3: count vectors and duplicates in each slice //-------------------------------------------------------------------------- // Duplicates are located, counted and their indices negated. The # of // vectors in each slice is counted. If the indices are known to not have // duplicates, then only the vectors are counted. Counting the # of // vectors is skipped if already done by Step 1. if (known_no_duplicates) { //---------------------------------------------------------------------- // no duplicates: just count # vectors in each slice //---------------------------------------------------------------------- // This is much faster, particularly if the # of vectors in each slice // has already been computed. #ifdef GB_DEBUG { // assert that there are no duplicates int64_t ilast = -1, jlast = -1 ; for (int64_t t = 0 ; t < nvals ; t++) { int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ; bool is_duplicate = (i == ilast && j == jlast) ; ASSERT (!is_duplicate) ; ilast = i ; jlast = j ; } } #endif if (vdim <= 1) { // all tuples appear in at most one vector, and there are no // duplicates, so there is no need to scan I_work or J_work. for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; tnvec_slice [tid] = 0 ; tnz_slice [tid] = tend - tstart ; } tnvec_slice [0] = (nvals == 0) ? 0 : 1 ; } else { // count the # of unique vector indices in J_work. No need to scan // I_work since there are no duplicates to be found. Also no need // to compute them if already found in Step 1. if (!tnvec_and_tnz_slice_computed) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = 0 ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t j = J_work [t] ; if (j > jlast) { // vector j starts in this slice my_tnvec++ ; jlast = j ; } } tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = tend - tstart ; } } } } else { //---------------------------------------------------------------------- // look for duplicates and count # vectors in each slice //---------------------------------------------------------------------- int64_t ilast_slice [nthreads] ; for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; ilast_slice [tid] = GB_I_WORK (tstart-1) ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = 0 ; int64_t my_ndupl = 0 ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t ilast = ilast_slice [tid] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t i = I_work [t] ; int64_t j = GB_J_WORK (t) ; // tuples are now sorted but there may be duplicates ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ; // check if (j,i,k) is a duplicate if (i == ilast && j == jlast) { // flag the tuple as a duplicate I_work [t] = -1 ; my_ndupl++ ; // the sort places earlier duplicate tuples (with smaller // k) after later ones (with larger k). ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ; } else { // this is a new tuple if (j > jlast) { // vector j starts in this slice my_tnvec++ ; jlast = j ; } ilast = i ; } } tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = (tend - tstart) - my_ndupl ; } } //-------------------------------------------------------------------------- // find total # of vectors and duplicates in all tuples //-------------------------------------------------------------------------- // Replace tnvec_slice with its cumulative sum, after which each slice tid // will be responsible for the # vectors in T that range from tnvec_slice // [tid] to tnvec_slice [tid+1]-1. GB_cumsum (tnvec_slice, nthreads, NULL, 1) ; int64_t tnvec = tnvec_slice [nthreads] ; // Replace tnz_slice with its cumulative sum GB_cumsum (tnz_slice, nthreads, NULL, 1) ; // find the total # of final entries, after assembling duplicates int64_t tnz = tnz_slice [nthreads] ; int64_t ndupl = nvals - tnz ; //-------------------------------------------------------------------------- // allocate T; always hypersparse //-------------------------------------------------------------------------- // [ allocate T; allocate T->p and T->h but do not initialize them. // T is always hypersparse. GB_NEW (&T, ttype, vlen, vdim, GB_Ap_malloc, is_csc, GB_FORCE_HYPER, GB_ALWAYS_HYPER, tnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_WORK ; return (info) ; } ASSERT (T->is_hyper) ; ASSERT (T->nzmax == 0) ; // T->i and T->x not yet allocated ASSERT (T->x == NULL) ; ASSERT (T->i == NULL) ; (*Thandle) = T ; //-------------------------------------------------------------------------- // STEP 4: construct the vector pointers and hyperlist for T //-------------------------------------------------------------------------- // Step 4 scans the J_work indices and constructs T->h and T->p. int64_t *restrict Th = T->h ; int64_t *restrict Tp = T->p ; if (vdim <= 1) { //---------------------------------------------------------------------- // special case for vectors //---------------------------------------------------------------------- ASSERT (tnvec == 0 || tnvec == 1) ; if (tnvec > 0) { Th [0] = 0 ; Tp [0] = 0 ; } } else if (ndupl == 0) { //---------------------------------------------------------------------- // no duplicates appear //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = tnvec_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t j = GB_J_WORK (t) ; if (j > jlast) { // vector j starts in this slice Th [my_tnvec] = j ; Tp [my_tnvec] = t ; my_tnvec++ ; jlast = j ; } } } } else { //---------------------------------------------------------------------- // it is known that at least one duplicate appears //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnz = tnz_slice [tid] ; int64_t my_tnvec = tnvec_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t i = I_work [t] ; int64_t j = GB_J_WORK (t) ; if (i >= 0) { // this is a new tuple if (j > jlast) { // vector j starts in this slice Th [my_tnvec] = j ; Tp [my_tnvec] = my_tnz ; my_tnvec++ ; jlast = j ; } my_tnz++ ; } } } } // log the end of the last vector T->nvec_nonempty = tnvec ; T->nvec = tnvec ; Tp [tnvec] = tnz ; ASSERT (T->nvec == T->plen) ; T->magic = GB_MAGIC ; // T->p and T->h are now valid ] //-------------------------------------------------------------------------- // free J_work if it exists //-------------------------------------------------------------------------- ASSERT (J_work_handle != NULL) ; GB_FREE_MEMORY (*J_work_handle, ijslen, sizeof (int64_t)) ; J_work = NULL ; //-------------------------------------------------------------------------- // allocate T->i //-------------------------------------------------------------------------- T->nzmax = GB_IMAX (tnz, 1) ; if (ndupl == 0) { // shrink I_work from size ijslen to size T->nzmax if (T->nzmax < ijslen) { // this cannot fail since the size is shrinking. bool ok ; GB_REALLOC_MEMORY (I_work, T->nzmax, ijslen, sizeof (int64_t), &ok); ASSERT (ok) ; } // transplant I_work into T->i T->i = I_work ; I_work = NULL ; (*I_work_handle) = NULL ; } else { // duplicates exist, so allocate a new T->i. I_work must be freed later GB_MALLOC_MEMORY (T->i, tnz, sizeof (int64_t)) ; if (T->i == NULL) { // out of memory GB_MATRIX_FREE (&T) ; GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } } int64_t *restrict Ti = T->i ; //========================================================================== // numerical phase of the build: assemble any duplicates //========================================================================== // The tuples have been sorted. Assemble any duplicates with a switch // factory of built-in workers, or four generic workers. The vector // pointers T->p and hyperlist T->h (if hypersparse) have already been // computed. // If there are no duplicates, T->i holds the row indices of the tuple. // Otherwise, the row indices are still in I_work. K_work holds the // positions of each tuple in the array S. The tuples are sorted so that // duplicates are adjacent to each other and they appear in the order they // appeared in the original tuples. This method assembles the duplicates // and computes T->i and T->x from I_work, K_work, and S. into T, becoming // T->i. If no duplicates appear, T->i is already computed, and S just // needs to be copied and permuted into T->x. // The (i,k,S[k]) tuples are held in two integer arrays: (1) I_work or T->i, // and (2) K_work, and an array S of numerical values. S has not been // sorted, nor even accessed yet. It is identical to the original unsorted // tuples. The (i,k,S[k]) tuple holds the row index i, the position k, and // the value S [k]. This entry becomes T(i,j) = S [k] in the matrix T, and // duplicates (if any) are assembled via the dup operator. //-------------------------------------------------------------------------- // get opcodes and check types //-------------------------------------------------------------------------- // With GB_build, there can be 1 to 2 different types. // T->type is identical to the types of x,y,z for z=dup(x,y). // dup is never NULL and all its three types are the same // The type of S (scode) can different but must be compatible // with T->type // With GB_wait, there can be 1 to 5 different types: // The pending tuples are in S, of type scode which must be // compatible with dup->ytype and T->type // z = dup (x,y): can be NULL or have 1 to 3 different types // T->type: must be compatible with all above types. // dup may be NULL, in which case it is assumed be the implicit SECOND // operator, with all three types equal to T->type GrB_Type xtype, ytype, ztype ; GxB_binary_function fdup ; #ifndef GBCOMPACT GB_Opcode opcode ; #endif GB_Type_code tcode = ttype->code ; bool op_2nd ; ASSERT_OK (GB_check (ttype, "ttype for build_factorize", GB0)) ; if (dup == NULL) { //---------------------------------------------------------------------- // dup is the implicit SECOND operator //---------------------------------------------------------------------- // z = SECOND (x,y) where all three types are the same as ttype // T(i,j) = (ttype) S(k) will be done for all tuples. #ifndef GBCOMPACT opcode = GB_SECOND_opcode ; #endif ASSERT (GB_op_is_second (dup, ttype)) ; xtype = ttype ; ytype = ttype ; ztype = ttype ; fdup = NULL ; op_2nd = true ; } else { //---------------------------------------------------------------------- // dup is an explicit operator //---------------------------------------------------------------------- // T(i,j) = (ttype) S[k] will be done for the first tuple. // for subsequent tuples: T(i,j) += S[k], via the dup operator and // typecasting: // // y = (dup->ytype) S[k] // x = (dup->xtype) T(i,j) // z = (dup->ztype) dup (x,y) // T(i,j) = (ttype) z ASSERT_OK (GB_check (dup, "dup for build_factory", GB0)) ; #ifndef GBCOMPACT opcode = dup->opcode ; #endif xtype = dup->xtype ; ytype = dup->ytype ; ztype = dup->ztype ; fdup = dup->function ; op_2nd = GB_op_is_second (dup, ttype) ; } //-------------------------------------------------------------------------- // get the sizes and codes of each type //-------------------------------------------------------------------------- GB_Type_code zcode = ztype->code ; GB_Type_code xcode = xtype->code ; GB_Type_code ycode = ytype->code ; ASSERT (GB_code_compatible (tcode, scode)) ; // T(i,j) = (ttype) S ASSERT (GB_code_compatible (ycode, scode)) ; // y = (ytype) S ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j) ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z size_t zsize = ztype->size ; size_t xsize = xtype->size ; size_t ysize = ytype->size ; // so that tcode can match scode GB_Type_code tcode2 = (tcode == GB_UCT_code) ? GB_UDT_code : tcode ; GB_Type_code scode2 = (scode == GB_UCT_code) ? GB_UDT_code : scode ; // no typecasting if all 5 types are the same bool nocasting = (tcode2 == scode2) && (ttype == xtype) && (ttype == ytype) && (ttype == ztype) ; //-------------------------------------------------------------------------- // STEP 5: assemble the tuples //-------------------------------------------------------------------------- bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ; if (copy_S_into_T && S_work != NULL) { //---------------------------------------------------------------------- // transplant S_work into T->x //---------------------------------------------------------------------- // No typecasting is needed, the tuples were originally in sorted // order, and no duplicates appear. All that is required is to copy S // into Tx. S can be directly transplanted into T->x since S is // provided as S_work. GB_builder must either transplant or free // S_work. The transplant can be used by GB_wait (whenever the tuples // are already sorted, with no duplicates, and no typecasting is // needed, since S_work is always A->Pending->x). This transplant can // rarely be used for GB_transpose (when op is NULL and the transposed // tuples happen to be sorted, which is unlikely). T->x = S_work ; S_work = NULL ; (*S_work_handle) = NULL ; } else { //---------------------------------------------------------------------- // allocate T->x //---------------------------------------------------------------------- GB_MALLOC_MEMORY (T->x, tnz, ttype->size) ; if (T->x == NULL) { // out of memory GB_MATRIX_FREE (&T) ; GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } GB_void *restrict Tx = T->x ; if (copy_S_into_T) { //------------------------------------------------------------------ // copy S into T->x //------------------------------------------------------------------ // No typecasting is needed, the tuples were originally in sorted // order, and no duplicates appear. All that is required is to // copy S into Tx. S cannot be transplanted into T->x since // S_work is NULL and S_input cannot be modified by GB_builder. ASSERT (S_work == NULL) ; ASSERT (S == S_input) ; GB_memcpy (Tx, S, nvals * tsize, nthreads) ; } else if (nocasting) { //------------------------------------------------------------------ // assemble the values, S, into T, no typecasting needed //------------------------------------------------------------------ // S (either S_work or S_input) must be permuted and copied into // T->x, since the tuples had to be sorted, or duplicates appear. // Any duplicates are now assembled. // There are 44 common cases of this function for built-in types // and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types // (all but boolean; and OR, AND, XOR, and EQ for boolean. // In addition, the FIRST and SECOND operators are hard-coded, for // another 22 workers, since SECOND is used by GB_wait and since // FIRST is useful for keeping the first tuple seen. It is // controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they // do not appear in GB_reduce_to_* where the FIRST and SECOND // operators are not needed. // Early exit cannot be exploited, so the terminal is ignored. #define GB_INCLUDE_SECOND_OPERATOR bool done = false ; #define GB_red(opname,aname) GB_red_build_ ## opname ## aname #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) ((atype *) Tx, Ti, \ (atype *) S, nvals, ndupl, I_work, K_work, tstart_slice,\ tnz_slice, nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //------------------------------------------------------------------ // launch the switch factory //------------------------------------------------------------------ #ifndef GBCOMPACT // controlled by opcode and typecode GB_Type_code typecode = tcode ; #include "GB_red_factory.c" #endif //------------------------------------------------------------------ // generic worker //------------------------------------------------------------------ if (!done) { //-------------------------------------------------------------- // no typecasting, but use the fdup function pointer and memcpy //-------------------------------------------------------------- // Either the fdup operator or type of S and T are // user-defined, or fdup is not an associative operator handled // by the GB_red_factory, or some combination of these // conditions. User-defined types cannot be typecasted, so // this handles all user-defined types. // Tx [p] = (ttype) S [k], but with no typecasting #define GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \ memcpy (Tx +((p)*tsize), S +((k)*tsize), tsize) ; if (op_2nd) { //---------------------------------------------------------- // dup is the SECOND operator, with no typecasting //---------------------------------------------------------- // Tx [p] += (ttype) S [k], but 2nd op and no typecasting #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \ GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) #include "GB_reduce_build_template.c" } else { //---------------------------------------------------------- // dup is another operator, with no typecasting needed //---------------------------------------------------------- // Tx [p] += (ttype) S [k], but with no typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \ fdup (Tx +((p)*tsize), Tx +((p)*tsize), S +((k)*tsize)); #include "GB_reduce_build_template.c" } } } else { //------------------------------------------------------------------ // assemble the values S into T, typecasting as needed //------------------------------------------------------------------ // S (either S_work or S_input) must be permuted and copied into // T->x, since the tuples had to be sorted, or duplicates appear. // Any duplicates are now assembled. Not all of the 5 types are // the same, but all of them are built-in since user-defined types // cannot be typecasted. GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ; GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ; GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ; GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ; ASSERT (scode <= GB_FP64_code) ; ASSERT (tcode <= GB_FP64_code) ; ASSERT (xcode <= GB_FP64_code) ; ASSERT (ycode <= GB_FP64_code) ; ASSERT (zcode <= GB_FP64_code) ; // Tx [p] = (ttype) S [k], with typecasting #undef GB_CAST_ARRAY_TO_ARRAY #define GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \ cast_S_to_T (Tx +((p)*tsize), S +((k)*ssize), ssize) ; if (op_2nd) { //-------------------------------------------------------------- // dup operator is the SECOND operator, with typecasting //-------------------------------------------------------------- // Tx [p] += (ttype) S [k], but 2nd op, with typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \ GB_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) #include "GB_reduce_build_template.c" } else { //-------------------------------------------------------------- // dup is another operator, with typecasting required //-------------------------------------------------------------- // Tx [p] += S [k], with typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,S,k) \ { \ /* ywork = (ytype) S [k] */ \ GB_void ywork [ysize] ; \ cast_S_to_Y (ywork, S +((k)*ssize), ssize) ; \ /* xwork = (xtype) Tx [p] */ \ GB_void xwork [xsize] ; \ cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \ /* zwork = f (xwork, ywork) */ \ GB_void zwork [zsize] ; \ fdup (zwork, xwork, ywork) ; \ /* Tx [tnz-1] = (ttype) zwork */ \ cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \ } #include "GB_reduce_build_template.c" } } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; ASSERT_OK (GB_check (T, "T built", GB0)) ; return (GrB_SUCCESS) ; }
rar5_fmt_plug.c
/* RAR 5.0 cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia. * * http://www.rarlab.com/technote.htm * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the * following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * $rar5$<salt_len>$<salt>$<iter_log2>$<iv>$<pswcheck_len>$<pswcheck> */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rar5; #elif FMT_REGISTERS_H john_register_one(&fmt_rar5); #else #include <string.h> #include <assert.h> #include <errno.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // tuned on core i7 #endif #endif #include "arch.h" #include "johnswap.h" #include "stdint.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "rar5_common.h" //#define PBKDF2_HMAC_SHA256_ALSO_INCLUDE_CTX #include "pbkdf2_hmac_sha256.h" #include "memdbg.h" #define FORMAT_LABEL "RAR5" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA256 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i, j; unsigned char PswCheck[SIZE_PSWCHECK], PswCheckValue[SSE_GROUP_SZ_SHA256][SHA256_DIGEST_SIZE]; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (ARCH_WORD_32*)PswCheckValue[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, &(x.poutc), SHA256_DIGEST_SIZE, 0); // special wtf processing for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) { memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[j][i]; memcpy((void*)crypt_out[index+j], PswCheck, SIZE_PSWCHECK); } #else unsigned char PswCheckValue[SHA256_DIGEST_SIZE]; unsigned char PswCheck[SIZE_PSWCHECK]; int i; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, SIZE_SALT50, cur_salt->iterations+32, PswCheckValue, SHA256_DIGEST_SIZE, 0); // special wtf processing memset(PswCheck, 0, sizeof(PswCheck)); for (i = 0; i < SHA256_DIGEST_SIZE; i++) PswCheck[i % SIZE_PSWCHECK] ^= PswCheckValue[i]; memcpy((void*)crypt_out[index], PswCheck, SIZE_PSWCHECK); #endif } return count; } static void rar5_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_rar5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, rar5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
bt.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - BT This benchmark is an OpenMP C version of the NPB BT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: R. Van der Wijngaart T. Harris M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include <nautilus/shell.h> #include "../paging_benchmark.h" /* global variables */ #include "header.h" /* function declarations */ static void add(void); static void adi(void); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void); static void exact_solution(double xi, double eta, double zeta, double dtemp[5]); static void initialize(void); static void lhsinit(void); static void lhsx(void); static void lhsy(void); static void lhsz(void); static void compute_rhs(void); static void set_constants(void); static void verify(int no_time_steps, char *class, boolean *verified); static void x_solve(void); static void x_backsubstitute(void); static void x_solve_cell(void); static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]); static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]); static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]); static void binvrhs(double lhs[5][5], double r[5]); static void y_solve(void); static void y_backsubstitute(void); static void y_solve_cell(void); static void z_solve(void); static void z_backsubstitute(void); static void z_solve_cell(void); static int program_BT(char *__buf, void* __priv); static struct shell_cmd_impl nas_bt_impl = { .cmd = "nas-bt", .help_str = "NAS parallel benchmark BT", .handler = program_BT, }; nk_register_shell_cmd(nas_bt_impl); #ifdef NAUT_CONFIG_ASPACE_PAGING int program_BT_paging(char * _buf, void *_priv){ return paging_wrapper(_buf, _priv, &program_BT); } static struct shell_cmd_impl nas_bt_paging_impl = { .cmd = "nas-bt-paging", .help_str = "NAS parallel benchmark BT with paging", .handler = program_BT_paging, }; nk_register_shell_cmd(nas_bt_paging_impl); #endif /*-------------------------------------------------------------------- program BT c-------------------------------------------------------------------*/ static int program_BT(char *__buf, void* __priv) { int niter, step, n3; int nthreads = 1; double navg, mflops; double tmax; boolean verified; char class; //FILE *fp; /*-------------------------------------------------------------------- c Root node reads input file (if it exists) else takes c defaults from parameters c-------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - BT Benchmark\n\n"); /* fp = fopen("inputbt.data", "r"); */ /* if (fp != NULL) { */ /* printf(" Reading from input file inputbt.data"); */ /* fscanf(fp, "%d", &niter); */ /* while (fgetc(fp) != '\n'); */ /* fscanf(fp, "%lg", &dt); */ /* while (fgetc(fp) != '\n'); */ /* fscanf(fp, "%d%d%d", */ /* &grid_points[0], &grid_points[1], &grid_points[2]); */ /* fclose(fp); */ /* } else { */ /* printf(" No input file inputbt.data. Using compiled defaults\n"); */ niter = NITER_DEFAULT; dt = DT_DEFAULT; grid_points[0] = PROBLEM_SIZE; grid_points[1] = PROBLEM_SIZE; grid_points[2] = PROBLEM_SIZE; // } printf(" Size: %3dx%3dx%3d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); if (grid_points[0] > IMAX || grid_points[1] > JMAX || grid_points[2] > KMAX) { printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); /*-------------------------------------------------------------------- c do one time step to touch all code, and reinitialize c-------------------------------------------------------------------*/ adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { if (step%20 == 0 || step == 1) { printf(" Time step %4d\n", step); } adi(); } #pragma omp parallel { #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); printf("nthreads %d\n",nthreads); #endif /* _OPENMP */ } /* end parallel */ timer_stop(1); tmax = timer_read(1); verify(niter, &class, &verified); n3 = grid_points[0]*grid_points[1]*grid_points[2]; navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0; if ( tmax != 0.0 ) { mflops = 1.0e-6*(double)niter* (3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax; } else { mflops = 0.0; } c_print_results("BT", class, grid_points[0], grid_points[1], grid_points[2], niter, nthreads, tmax, mflops, " floating point", verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); return 0; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void add(void) { /*-------------------------------------------------------------------- c addition of update to the vector u c-------------------------------------------------------------------*/ int i, j, k, m; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void adi(void) { #pragma omp parallel compute_rhs(); #pragma omp parallel x_solve(); #pragma omp parallel y_solve(); #pragma omp parallel z_solve(); #pragma omp parallel add(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error_norm(double rms[5]) { /*-------------------------------------------------------------------- c this function computes the norm of the difference between the c computed solution and the exact solution c-------------------------------------------------------------------*/ int i, j, k, m, d; double xi, eta, zeta, u_exact[5], add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[i][j][k][m] - u_exact[m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs_norm(double rms[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ int i, j, k, d, m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { add = rhs[i][j][k][m]; rms[m] = rms[m] + add*add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d <= 2; d++) { rms[m] = rms[m] / (double)(grid_points[d]-2); } rms[m] = sqrt(rms[m]); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_rhs(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the right hand side based on exact solution c-------------------------------------------------------------------*/ double dtemp[5], xi, eta, zeta, dtpp; int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1; /*-------------------------------------------------------------------- c initialize c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = 0.0; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[i][m] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m <= 4; m++) { buf[i][m] = dtpp * dtemp[m]; } cuf[i] = buf[i][1] * buf[i][1]; buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3]; q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] + buf[i][3]*ue[i][3]); } for (i = 1; i < grid_points[0]-1; i++) { im1 = i-1; ip1 = i+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tx2*(ue[ip1][1]-ue[im1][1])+ dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))- (ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+ xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+ dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+ xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+ dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+ xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+ dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])- buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+ 0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+ xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+ xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+ dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { i = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]); i = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } for (m = 0; m < 5; m++) { for (i = 1*3; i <= grid_points[0]-3*1-1; i++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]); } } for (m = 0; m < 5; m++) { i = grid_points[0]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 6.0*ue[i][m] - 4.0*ue[i+1][m]); i = grid_points[0]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (k = 1; k < grid_points[2]-1; k++) { zeta = (double)k * dnzm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[j][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[j][m] = dtpp * dtemp[m]; } cuf[j] = buf[j][2] * buf[j][2]; buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3]; q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] + buf[j][3]*ue[j][3]); } for (j = 1; j < grid_points[1]-1; j++) { jm1 = j-1; jp1 = j+1; forcing[i][j][k][0] = forcing[i][j][k][0] - ty2*( ue[jp1][2]-ue[jm1][2] )+ dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+ yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+ dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))- (ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+ yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+ dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+ yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+ dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])- buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+ 0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+ buf[jm1][0])+ yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+ yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+ dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { j = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]); j = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } for (m = 0; m < 5; m++) { for (j = 1*3; j <= grid_points[1]-3*1-1; j++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]); } } for (m = 0; m < 5; m++) { j = grid_points[1]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 6.0*ue[j][m] - 4.0*ue[j+1][m]); j = grid_points[1]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { xi = (double)i * dnxm1; for (j = 1; j < grid_points[1]-1; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[k][m] = dtemp[m]; } dtpp = 1.0/dtemp[0]; for (m = 1; m <= 4; m++) { buf[k][m] = dtpp * dtemp[m]; } cuf[k] = buf[k][3] * buf[k][3]; buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2]; q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] + buf[k][3]*ue[k][3]); } for (k = 1; k < grid_points[2]-1; k++) { km1 = k-1; kp1 = k+1; forcing[i][j][k][0] = forcing[i][j][k][0] - tz2*( ue[kp1][3]-ue[km1][3] )+ dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]); forcing[i][j][k][1] = forcing[i][j][k][1] - tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+ zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+ dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]); forcing[i][j][k][2] = forcing[i][j][k][2] - tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+ zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+ dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]); forcing[i][j][k][3] = forcing[i][j][k][3] - tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))- (ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+ zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+ dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]); forcing[i][j][k][4] = forcing[i][j][k][4] - tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])- buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+ 0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0] +buf[km1][0])+ zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+ zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+ dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { k = 1; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]); k = 2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (-4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } for (m = 0; m < 5; m++) { for (k = 1*3; k <= grid_points[2]-3*1-1; k++) { forcing[i][j][k][m] = forcing[i][j][k][m] - dssp* (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]); } } for (m = 0; m < 5; m++) { k = grid_points[2]-3; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 6.0*ue[k][m] - 4.0*ue[k+1][m]); k = grid_points[2]-2; forcing[i][j][k][m] = forcing[i][j][k][m] - dssp * (ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]); } } } /*-------------------------------------------------------------------- c now change the sign of the forcing function, c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact_solution(double xi, double eta, double zeta, double dtemp[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c this function returns the exact solution at point xi, eta, zeta c-------------------------------------------------------------------*/ int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[m][0] + xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7] + xi*ce[m][10]))) + eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8] + eta*ce[m][11])))+ zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] + zeta*ce[m][12]))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void initialize(void) { #pragma omp parallel { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This subroutine initializes the field variable u using c tri-linear transfinite interpolation of the boundary values c-------------------------------------------------------------------*/ int i, j, k, m, ix, iy, iz; double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5]; /*-------------------------------------------------------------------- c Later (in compute_rhs) we compute 1/u for every element. A few of c the corner elements are not used, but it convenient (and faster) c to compute the whole thing with a simple loop. Make sure those c values are nonzero by initializing the whole thing here. c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < IMAX; i++) { for (j = 0; j < IMAX; j++) { for (k = 0; k < IMAX; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = 1.0; } } } } /*-------------------------------------------------------------------- c first store the "interpolated" values everywhere on the grid c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; for (ix = 0; ix < 2; ix++) { exact_solution((double)ix, eta, zeta, &(Pface[ix][0][0])); } for (iy = 0; iy < 2; iy++) { exact_solution(xi, (double)iy , zeta, &Pface[iy][1][0]); } for (iz = 0; iz < 2; iz++) { exact_solution(xi, eta, (double)iz, &Pface[iz][2][0]); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m]; u[i][j][k][m] = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta; } } } } /*-------------------------------------------------------------------- c now store the exact values on the boundaries c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c west face c-------------------------------------------------------------------*/ i = 0; xi = 0.0; #pragma omp for nowait for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c east face c-------------------------------------------------------------------*/ i = grid_points[0]-1; xi = 1.0; #pragma omp for for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c south face c-------------------------------------------------------------------*/ j = 0; eta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c north face c-------------------------------------------------------------------*/ j = grid_points[1]-1; eta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double)k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c bottom face c-------------------------------------------------------------------*/ k = 0; zeta = 0.0; #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { xi = (double)i *dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } /*-------------------------------------------------------------------- c top face c-------------------------------------------------------------------*/ k = grid_points[2]-1; zeta = 1.0; #pragma omp for for (i = 0; i < grid_points[0]; i++) { xi = (double)i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double)j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[i][j][k][m] = temp[m]; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsinit(void) { #pragma omp parallel { int i, j, k, m, n; /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zero the whole left hand side for starters c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { lhs[i][j][k][0][m][n] = 0.0; lhs[i][j][k][1][m][n] = 0.0; lhs[i][j][k][2][m][n] = 0.0; } } } } } /*-------------------------------------------------------------------- c next, set all diagonal values to 1. This is overkill, but convenient c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { lhs[i][j][k][1][m][m] = 1.0; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsx(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side in the xi-direction c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c determine a (labeled f) and n jacobians c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (i = 0; i < grid_points[0]; i++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 1.0; fjac[ i][ j][ k][0][2] = 0.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 * u[i][j][k][1]) + c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = ( 2.0 - c2 ) * ( u[i][j][k][1] / u[i][j][k][0] ); fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 ); fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][1][4] = c2; fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][2][3] = 0.0; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][2] = 0.0; fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][1] * tmp1 ); fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( 3.0*u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] ) * tmp2; fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 ); njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = con43 * c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in x direction c-------------------------------------------------------------------*/ for (i = 1; i < grid_points[0]-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0] - tmp1 * njac[i-1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1] - tmp1 * njac[i-1][j][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2] - tmp1 * njac[i-1][j][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3] - tmp1 * njac[i-1][j][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4] - tmp1 * njac[i-1][j][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0] - tmp1 * njac[i-1][j][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1] - tmp1 * njac[i-1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2] - tmp1 * njac[i-1][j][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3] - tmp1 * njac[i-1][j][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4] - tmp1 * njac[i-1][j][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0] - tmp1 * njac[i-1][j][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1] - tmp1 * njac[i-1][j][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2] - tmp1 * njac[i-1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3] - tmp1 * njac[i-1][j][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4] - tmp1 * njac[i-1][j][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0] - tmp1 * njac[i-1][j][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1] - tmp1 * njac[i-1][j][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2] - tmp1 * njac[i-1][j][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3] - tmp1 * njac[i-1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4] - tmp1 * njac[i-1][j][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0] - tmp1 * njac[i-1][j][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1] - tmp1 * njac[i-1][j][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2] - tmp1 * njac[i-1][j][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3] - tmp1 * njac[i-1][j][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4] - tmp1 * njac[i-1][j][k][4][4] - tmp1 * dx5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dx1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dx2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dx3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dx4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dx5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0] - tmp1 * njac[i+1][j][k][0][0] - tmp1 * dx1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1] - tmp1 * njac[i+1][j][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2] - tmp1 * njac[i+1][j][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3] - tmp1 * njac[i+1][j][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4] - tmp1 * njac[i+1][j][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0] - tmp1 * njac[i+1][j][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1] - tmp1 * njac[i+1][j][k][1][1] - tmp1 * dx2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2] - tmp1 * njac[i+1][j][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3] - tmp1 * njac[i+1][j][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4] - tmp1 * njac[i+1][j][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0] - tmp1 * njac[i+1][j][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1] - tmp1 * njac[i+1][j][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2] - tmp1 * njac[i+1][j][k][2][2] - tmp1 * dx3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3] - tmp1 * njac[i+1][j][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4] - tmp1 * njac[i+1][j][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0] - tmp1 * njac[i+1][j][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1] - tmp1 * njac[i+1][j][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2] - tmp1 * njac[i+1][j][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3] - tmp1 * njac[i+1][j][k][3][3] - tmp1 * dx4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4] - tmp1 * njac[i+1][j][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0] - tmp1 * njac[i+1][j][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1] - tmp1 * njac[i+1][j][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2] - tmp1 * njac[i+1][j][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3] - tmp1 * njac[i+1][j][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4] - tmp1 * njac[i+1][j][k][4][4] - tmp1 * dx5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsy(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three y-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the tri-diagonal matrix; c determine a (labeled f) and n jacobians for cell c c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[ i][ j][ k][0][0] = 0.0; fjac[ i][ j][ k][0][1] = 0.0; fjac[ i][ j][ k][0][2] = 1.0; fjac[ i][ j][ k][0][3] = 0.0; fjac[ i][ j][ k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1; fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][3] = 0.0; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][2][2] = ( 2.0 - c2 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1; fjac[i][j][k][2][4] = c2; fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][3][1] = 0.0; fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][3][4] = 0.0; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * u[i][j][k][4] * tmp1 ) * u[i][j][k][2] * tmp1; fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2] * tmp2; fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1 - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + 3.0 * u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = con43 * c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = c3c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 ) * tmp1; } } } /*-------------------------------------------------------------------- c now joacobians set, so form left hand side in y direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0] - tmp1 * njac[i][j-1][k][0][0] - tmp1 * dy1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1] - tmp1 * njac[i][j-1][k][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2] - tmp1 * njac[i][j-1][k][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3] - tmp1 * njac[i][j-1][k][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4] - tmp1 * njac[i][j-1][k][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0] - tmp1 * njac[i][j-1][k][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1] - tmp1 * njac[i][j-1][k][1][1] - tmp1 * dy2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2] - tmp1 * njac[i][j-1][k][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3] - tmp1 * njac[i][j-1][k][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4] - tmp1 * njac[i][j-1][k][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0] - tmp1 * njac[i][j-1][k][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1] - tmp1 * njac[i][j-1][k][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2] - tmp1 * njac[i][j-1][k][2][2] - tmp1 * dy3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3] - tmp1 * njac[i][j-1][k][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4] - tmp1 * njac[i][j-1][k][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0] - tmp1 * njac[i][j-1][k][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1] - tmp1 * njac[i][j-1][k][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2] - tmp1 * njac[i][j-1][k][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3] - tmp1 * njac[i][j-1][k][3][3] - tmp1 * dy4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4] - tmp1 * njac[i][j-1][k][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0] - tmp1 * njac[i][j-1][k][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1] - tmp1 * njac[i][j-1][k][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2] - tmp1 * njac[i][j-1][k][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3] - tmp1 * njac[i][j-1][k][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4] - tmp1 * njac[i][j-1][k][4][4] - tmp1 * dy5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dy1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dy2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dy3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dy4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dy5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0] - tmp1 * njac[i][j+1][k][0][0] - tmp1 * dy1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1] - tmp1 * njac[i][j+1][k][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2] - tmp1 * njac[i][j+1][k][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3] - tmp1 * njac[i][j+1][k][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4] - tmp1 * njac[i][j+1][k][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0] - tmp1 * njac[i][j+1][k][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1] - tmp1 * njac[i][j+1][k][1][1] - tmp1 * dy2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2] - tmp1 * njac[i][j+1][k][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3] - tmp1 * njac[i][j+1][k][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4] - tmp1 * njac[i][j+1][k][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0] - tmp1 * njac[i][j+1][k][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1] - tmp1 * njac[i][j+1][k][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2] - tmp1 * njac[i][j+1][k][2][2] - tmp1 * dy3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3] - tmp1 * njac[i][j+1][k][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4] - tmp1 * njac[i][j+1][k][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0] - tmp1 * njac[i][j+1][k][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1] - tmp1 * njac[i][j+1][k][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2] - tmp1 * njac[i][j+1][k][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3] - tmp1 * njac[i][j+1][k][3][3] - tmp1 * dy4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4] - tmp1 * njac[i][j+1][k][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0] - tmp1 * njac[i][j+1][k][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1] - tmp1 * njac[i][j+1][k][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2] - tmp1 * njac[i][j+1][k][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3] - tmp1 * njac[i][j+1][k][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4] - tmp1 * njac[i][j+1][k][4][4] - tmp1 * dy5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void lhsz(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c This function computes the left hand side for the three z-factors c-------------------------------------------------------------------*/ int i, j, k; /*-------------------------------------------------------------------- c Compute the indices for storing the block-diagonal matrix; c determine c (labeled f) and s jacobians c---------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 0; k < grid_points[2]; k++) { tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][j][k][0][0] = 0.0; fjac[i][j][k][0][1] = 0.0; fjac[i][j][k][0][2] = 0.0; fjac[i][j][k][0][3] = 1.0; fjac[i][j][k][0][4] = 0.0; fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1; fjac[i][j][k][1][2] = 0.0; fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1; fjac[i][j][k][1][4] = 0.0; fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][2][1] = 0.0; fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1; fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1; fjac[i][j][k][2][4] = 0.0; fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 ) + 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1; fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1; fjac[i][j][k][3][3] = ( 2.0 - c2 ) * u[i][j][k][3] * tmp1; fjac[i][j][k][3][4] = c2; fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) * tmp2 - c1 * ( u[i][j][k][4] * tmp1 ) ) * ( u[i][j][k][3] * tmp1 ); fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] ) * tmp2; fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 ) - 0.50 * c2 * ( ( u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + 3.0*u[i][j][k][3]*u[i][j][k][3] ) * tmp2 ); fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1; njac[i][j][k][0][0] = 0.0; njac[i][j][k][0][1] = 0.0; njac[i][j][k][0][2] = 0.0; njac[i][j][k][0][3] = 0.0; njac[i][j][k][0][4] = 0.0; njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1]; njac[i][j][k][1][1] = c3c4 * tmp1; njac[i][j][k][1][2] = 0.0; njac[i][j][k][1][3] = 0.0; njac[i][j][k][1][4] = 0.0; njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2]; njac[i][j][k][2][1] = 0.0; njac[i][j][k][2][2] = c3c4 * tmp1; njac[i][j][k][2][3] = 0.0; njac[i][j][k][2][4] = 0.0; njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3]; njac[i][j][k][3][1] = 0.0; njac[i][j][k][3][2] = 0.0; njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1; njac[i][j][k][3][4] = 0.0; njac[i][j][k][4][0] = - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][1])) - ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2])) - ( con43 * c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3])) - c1345 * tmp2 * u[i][j][k][4]; njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1]; njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2]; njac[i][j][k][4][3] = ( con43 * c3c4 - c1345 ) * tmp2 * u[i][j][k][3]; njac[i][j][k][4][4] = ( c1345 )* tmp1; } } } /*-------------------------------------------------------------------- c now jacobians set, so form left hand side in z direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0] - tmp1 * njac[i][j][k-1][0][0] - tmp1 * dz1; lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1] - tmp1 * njac[i][j][k-1][0][1]; lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2] - tmp1 * njac[i][j][k-1][0][2]; lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3] - tmp1 * njac[i][j][k-1][0][3]; lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4] - tmp1 * njac[i][j][k-1][0][4]; lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0] - tmp1 * njac[i][j][k-1][1][0]; lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1] - tmp1 * njac[i][j][k-1][1][1] - tmp1 * dz2; lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2] - tmp1 * njac[i][j][k-1][1][2]; lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3] - tmp1 * njac[i][j][k-1][1][3]; lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4] - tmp1 * njac[i][j][k-1][1][4]; lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0] - tmp1 * njac[i][j][k-1][2][0]; lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1] - tmp1 * njac[i][j][k-1][2][1]; lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2] - tmp1 * njac[i][j][k-1][2][2] - tmp1 * dz3; lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3] - tmp1 * njac[i][j][k-1][2][3]; lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4] - tmp1 * njac[i][j][k-1][2][4]; lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0] - tmp1 * njac[i][j][k-1][3][0]; lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1] - tmp1 * njac[i][j][k-1][3][1]; lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2] - tmp1 * njac[i][j][k-1][3][2]; lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3] - tmp1 * njac[i][j][k-1][3][3] - tmp1 * dz4; lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4] - tmp1 * njac[i][j][k-1][3][4]; lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0] - tmp1 * njac[i][j][k-1][4][0]; lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1] - tmp1 * njac[i][j][k-1][4][1]; lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2] - tmp1 * njac[i][j][k-1][4][2]; lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3] - tmp1 * njac[i][j][k-1][4][3]; lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4] - tmp1 * njac[i][j][k-1][4][4] - tmp1 * dz5; lhs[i][j][k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][j][k][0][0] + tmp1 * 2.0 * dz1; lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1]; lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2]; lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3]; lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4]; lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0]; lhs[i][j][k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][j][k][1][1] + tmp1 * 2.0 * dz2; lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2]; lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3]; lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4]; lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0]; lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1]; lhs[i][j][k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][j][k][2][2] + tmp1 * 2.0 * dz3; lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3]; lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4]; lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0]; lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1]; lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2]; lhs[i][j][k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][j][k][3][3] + tmp1 * 2.0 * dz4; lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4]; lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0]; lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1]; lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2]; lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3]; lhs[i][j][k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][j][k][4][4] + tmp1 * 2.0 * dz5; lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0] - tmp1 * njac[i][j][k+1][0][0] - tmp1 * dz1; lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1] - tmp1 * njac[i][j][k+1][0][1]; lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2] - tmp1 * njac[i][j][k+1][0][2]; lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3] - tmp1 * njac[i][j][k+1][0][3]; lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4] - tmp1 * njac[i][j][k+1][0][4]; lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0] - tmp1 * njac[i][j][k+1][1][0]; lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1] - tmp1 * njac[i][j][k+1][1][1] - tmp1 * dz2; lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2] - tmp1 * njac[i][j][k+1][1][2]; lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3] - tmp1 * njac[i][j][k+1][1][3]; lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4] - tmp1 * njac[i][j][k+1][1][4]; lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0] - tmp1 * njac[i][j][k+1][2][0]; lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1] - tmp1 * njac[i][j][k+1][2][1]; lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2] - tmp1 * njac[i][j][k+1][2][2] - tmp1 * dz3; lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3] - tmp1 * njac[i][j][k+1][2][3]; lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4] - tmp1 * njac[i][j][k+1][2][4]; lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0] - tmp1 * njac[i][j][k+1][3][0]; lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1] - tmp1 * njac[i][j][k+1][3][1]; lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2] - tmp1 * njac[i][j][k+1][3][2]; lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3] - tmp1 * njac[i][j][k+1][3][3] - tmp1 * dz4; lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4] - tmp1 * njac[i][j][k+1][3][4]; lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0] - tmp1 * njac[i][j][k+1][4][0]; lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1] - tmp1 * njac[i][j][k+1][4][1]; lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2] - tmp1 * njac[i][j][k+1][4][2]; lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3] - tmp1 * njac[i][j][k+1][4][3]; lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4] - tmp1 * njac[i][j][k+1][4][4] - tmp1 * dz5; } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void compute_rhs(void) { int i, j, k, m; double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1; /*-------------------------------------------------------------------- c compute the reciprocal of density, and the kinetic energy, c and the speed of sound. c-------------------------------------------------------------------*/ #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { rho_inv = 1.0/u[i][j][k][0]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[i][j][k][1] * rho_inv; vs[i][j][k] = u[i][j][k][2] * rho_inv; ws[i][j][k] = u[i][j][k][3] * rho_inv; square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] + u[i][j][k][2]*u[i][j][k][2] + u[i][j][k][3]*u[i][j][k][3] ) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; } } } /*-------------------------------------------------------------------- c copy the exact forcing term to the right hand side; because c this forcing term is known, we can store it on the whole grid c including the boundary c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = forcing[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c compute xi-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { uijk = us[i][j][k]; up1 = us[i+1][j][k]; um1 = us[i-1][j][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 * (u[i+1][j][k][0] - 2.0*u[i][j][k][0] + u[i-1][j][k][0]) - tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]); rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 * (u[i+1][j][k][1] - 2.0*u[i][j][k][1] + u[i-1][j][k][1]) + xxcon2*con43 * (up1 - 2.0*uijk + um1) - tx2 * (u[i+1][j][k][1]*up1 - u[i-1][j][k][1]*um1 + (u[i+1][j][k][4]- square[i+1][j][k]- u[i-1][j][k][4]+ square[i-1][j][k])* c2); rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 * (u[i+1][j][k][2] - 2.0*u[i][j][k][2] + u[i-1][j][k][2]) + xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] + vs[i-1][j][k]) - tx2 * (u[i+1][j][k][2]*up1 - u[i-1][j][k][2]*um1); rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 * (u[i+1][j][k][3] - 2.0*u[i][j][k][3] + u[i-1][j][k][3]) + xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] + ws[i-1][j][k]) - tx2 * (u[i+1][j][k][3]*up1 - u[i-1][j][k][3]*um1); rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 * (u[i+1][j][k][4] - 2.0*u[i][j][k][4] + u[i-1][j][k][4]) + xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] + qs[i-1][j][k]) + xxcon4 * (up1*up1 - 2.0*uijk*uijk + um1*um1) + xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i-1][j][k][4]*rho_i[i-1][j][k]) - tx2 * ( (c1*u[i+1][j][k][4] - c2*square[i+1][j][k])*up1 - (c1*u[i-1][j][k][4] - c2*square[i-1][j][k])*um1 ); } } } /*-------------------------------------------------------------------- c add fourth order xi-direction dissipation c-------------------------------------------------------------------*/ i = 1; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } i = 2; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m]); } } } #pragma omp for nowait for (i = 3; i < grid_points[0]-3; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] + u[i+2][j][k][m] ); } } } } i = grid_points[0]-3; #pragma omp for nowait for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] ); } } } i = grid_points[0]-2; #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] + 5.0*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute eta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j+1][k]; vm1 = vs[i][j-1][k]; rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 * (u[i][j+1][k][0] - 2.0*u[i][j][k][0] + u[i][j-1][k][0]) - ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]); rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 * (u[i][j+1][k][1] - 2.0*u[i][j][k][1] + u[i][j-1][k][1]) + yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] + us[i][j-1][k]) - ty2 * (u[i][j+1][k][1]*vp1 - u[i][j-1][k][1]*vm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 * (u[i][j+1][k][2] - 2.0*u[i][j][k][2] + u[i][j-1][k][2]) + yycon2*con43 * (vp1 - 2.0*vijk + vm1) - ty2 * (u[i][j+1][k][2]*vp1 - u[i][j-1][k][2]*vm1 + (u[i][j+1][k][4] - square[i][j+1][k] - u[i][j-1][k][4] + square[i][j-1][k]) *c2); rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 * (u[i][j+1][k][3] - 2.0*u[i][j][k][3] + u[i][j-1][k][3]) + yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] + ws[i][j-1][k]) - ty2 * (u[i][j+1][k][3]*vp1 - u[i][j-1][k][3]*vm1); rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 * (u[i][j+1][k][4] - 2.0*u[i][j][k][4] + u[i][j-1][k][4]) + yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] + qs[i][j-1][k]) + yycon4 * (vp1*vp1 - 2.0*vijk*vijk + vm1*vm1) + yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j-1][k][4]*rho_i[i][j-1][k]) - ty2 * ((c1*u[i][j+1][k][4] - c2*square[i][j+1][k]) * vp1 - (c1*u[i][j-1][k][4] - c2*square[i][j-1][k]) * vm1); } } } /*-------------------------------------------------------------------- c add fourth order eta-direction dissipation c-------------------------------------------------------------------*/ j = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } j = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 3; j < grid_points[1]-3; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] + u[i][j+2][k][m] ); } } } } j = grid_points[1]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] ); } } } j = grid_points[1]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] + 5.*u[i][j][k][m] ); } } } /*-------------------------------------------------------------------- c compute zeta-direction fluxes c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k+1]; wm1 = ws[i][j][k-1]; rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 * (u[i][j][k+1][0] - 2.0*u[i][j][k][0] + u[i][j][k-1][0]) - tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]); rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 * (u[i][j][k+1][1] - 2.0*u[i][j][k][1] + u[i][j][k-1][1]) + zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] + us[i][j][k-1]) - tz2 * (u[i][j][k+1][1]*wp1 - u[i][j][k-1][1]*wm1); rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 * (u[i][j][k+1][2] - 2.0*u[i][j][k][2] + u[i][j][k-1][2]) + zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] + vs[i][j][k-1]) - tz2 * (u[i][j][k+1][2]*wp1 - u[i][j][k-1][2]*wm1); rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 * (u[i][j][k+1][3] - 2.0*u[i][j][k][3] + u[i][j][k-1][3]) + zzcon2*con43 * (wp1 - 2.0*wijk + wm1) - tz2 * (u[i][j][k+1][3]*wp1 - u[i][j][k-1][3]*wm1 + (u[i][j][k+1][4] - square[i][j][k+1] - u[i][j][k-1][4] + square[i][j][k-1]) *c2); rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 * (u[i][j][k+1][4] - 2.0*u[i][j][k][4] + u[i][j][k-1][4]) + zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] + qs[i][j][k-1]) + zzcon4 * (wp1*wp1 - 2.0*wijk*wijk + wm1*wm1) + zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] - 2.0*u[i][j][k][4]*rho_i[i][j][k] + u[i][j][k-1][4]*rho_i[i][j][k-1]) - tz2 * ( (c1*u[i][j][k+1][4] - c2*square[i][j][k+1])*wp1 - (c1*u[i][j][k-1][4] - c2*square[i][j][k-1])*wm1); } } } /*-------------------------------------------------------------------- c add fourth order zeta-direction dissipation c-------------------------------------------------------------------*/ k = 1; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m]- dssp * ( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } k = 2; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * (-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m]); } } } #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = 3; k < grid_points[2]-3; k++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] + u[i][j][k+2][m] ); } } } } k = grid_points[2]-3; #pragma omp for nowait for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] ); } } } k = grid_points[2]-2; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (m = 0; m < 5; m++) { rhs[i][j][k][m] = rhs[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] + 5.0*u[i][j][k][m] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < 5; m++) { for (i = 1; i < grid_points[0]-1; i++) { rhs[i][j][k][m] = rhs[i][j][k][m] * dt; } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void set_constants(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 0.5; ce[0][7] = 0.02; ce[0][8] = 0.01; ce[0][9] = 0.03; ce[0][10] = 0.5; ce[0][11] = 0.4; ce[0][12] = 0.3; ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 0.01; ce[1][8] = 0.03; ce[1][9] = 0.02; ce[1][10] = 0.4; ce[1][11] = 0.3; ce[1][12] = 0.5; ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 0.04; ce[2][8] = 0.03; ce[2][9] = 0.05; ce[2][10] = 0.3; ce[2][11] = 0.5; ce[2][12] = 0.4; ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 0.03; ce[3][8] = 0.05; ce[3][9] = 0.04; ce[3][10] = 0.2; ce[3][11] = 0.1; ce[3][12] = 0.3; ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 0.1; ce[4][5] = 0.4; ce[4][6] = 0.3; ce[4][7] = 0.05; ce[4][8] = 0.04; ce[4][9] = 0.03; ce[4][10] = 0.1; ce[4][11] = 0.3; ce[4][12] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; dnxm1 = 1.0 / (double)(grid_points[0]-1); dnym1 = 1.0 / (double)(grid_points[1]-1); dnzm1 = 1.0 / (double)(grid_points[2]-1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0-c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; dxmax = max(dx3, dx4); dymax = max(dy2, dy4); dzmax = max(dz2, dz3); dssp = 0.25 * max(dx1, max(dy1, dz1) ); c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt*tx1; dttx2 = dt*tx2; dtty1 = dt*ty1; dtty2 = dt*ty2; dttz1 = dt*tz1; dttz2 = dt*tz2; c2dttx1 = 2.0*dttx1; c2dtty1 = 2.0*dtty1; c2dttz1 = 2.0*dttz1; dtdssp = dt*dssp; comz1 = dtdssp; comz4 = 4.0*dtdssp; comz5 = 5.0*dtdssp; comz6 = 6.0*dtdssp; c3c4tx3 = c3c4*tx3; c3c4ty3 = c3c4*ty3; c3c4tz3 = c3c4*tz3; dx1tx1 = dx1*tx1; dx2tx1 = dx2*tx1; dx3tx1 = dx3*tx1; dx4tx1 = dx4*tx1; dx5tx1 = dx5*tx1; dy1ty1 = dy1*ty1; dy2ty1 = dy2*ty1; dy3ty1 = dy3*ty1; dy4ty1 = dy4*ty1; dy5ty1 = dy5*ty1; dz1tz1 = dz1*tz1; dz2tz1 = dz2*tz1; dz3tz1 = dz3*tz1; dz4tz1 = dz4*tz1; dz5tz1 = dz5*tz1; c2iv = 2.5; con43 = 4.0/3.0; con16 = 1.0/6.0; xxcon1 = c3c4tx3*con43*tx3; xxcon2 = c3c4tx3*tx3; xxcon3 = c3c4tx3*conz1*tx3; xxcon4 = c3c4tx3*con16*tx3; xxcon5 = c3c4tx3*c1c5*tx3; yycon1 = c3c4ty3*con43*ty3; yycon2 = c3c4ty3*ty3; yycon3 = c3c4ty3*conz1*ty3; yycon4 = c3c4ty3*con16*ty3; yycon5 = c3c4ty3*c1c5*ty3; zzcon1 = c3c4tz3*con43*tz3; zzcon2 = c3c4tz3*tz3; zzcon3 = c3c4tz3*conz1*tz3; zzcon4 = c3c4tz3*con16*tz3; zzcon5 = c3c4tz3*c1c5*tz3; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(int no_time_steps, char *class, boolean *verified) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c verification routine c-------------------------------------------------------------------*/ double xcrref[5],xceref[5],xcrdif[5],xcedif[5], epsilon, xce[5], xcr[5], dtref; int m; /*-------------------------------------------------------------------- c tolerance level c-------------------------------------------------------------------*/ epsilon = 1.0e-08; /*-------------------------------------------------------------------- c compute the error norm and the residual norm, and exit if not printing c-------------------------------------------------------------------*/ error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = TRUE; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } /*-------------------------------------------------------------------- c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02 c-------------------------------------------------------------------*/ if (grid_points[0] == 12 && grid_points[1] == 12 && grid_points[2] == 12 && no_time_steps == 60) { *class = 'S'; dtref = 1.0e-2; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.7034283709541311e-01; xcrref[1] = 1.2975252070034097e-02; xcrref[2] = 3.2527926989486055e-02; xcrref[3] = 2.6436421275166801e-02; xcrref[4] = 1.9211784131744430e-01; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.9976913345811579e-04; xceref[1] = 4.5195666782961927e-05; xceref[2] = 7.3973765172921357e-05; xceref[3] = 7.3821238632439731e-05; xceref[4] = 8.9269630987491446e-04; /*-------------------------------------------------------------------- c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 24 && grid_points[1] == 24 && grid_points[2] == 24 && no_time_steps == 200) { *class = 'W'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.1125590409344e+03; xcrref[1] = 0.1180007595731e+02; xcrref[2] = 0.2710329767846e+02; xcrref[3] = 0.2469174937669e+02; xcrref[4] = 0.2638427874317e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.4419655736008e+01; xceref[1] = 0.4638531260002e+00; xceref[2] = 0.1011551749967e+01; xceref[3] = 0.9235878729944e+00; xceref[4] = 0.1018045837718e+02; /*-------------------------------------------------------------------- c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 64 && grid_points[1] == 64 && grid_points[2] == 64 && no_time_steps == 200) { *class = 'A'; dtref = 0.8e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.0806346714637264e+02; xcrref[1] = 1.1319730901220813e+01; xcrref[2] = 2.5974354511582465e+01; xcrref[3] = 2.3665622544678910e+01; xcrref[4] = 2.5278963211748344e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 4.2348416040525025e+00; xceref[1] = 4.4390282496995698e-01; xceref[2] = 9.6692480136345650e-01; xceref[3] = 8.8302063039765474e-01; xceref[4] = 9.7379901770829278e+00; /*-------------------------------------------------------------------- c reference data for 102X102X102 grids after 200 time steps, c with DT = 3.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 102 && grid_points[1] == 102 && grid_points[2] == 102 && no_time_steps == 200) { *class = 'B'; dtref = 3.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 1.4233597229287254e+03; xcrref[1] = 9.9330522590150238e+01; xcrref[2] = 3.5646025644535285e+02; xcrref[3] = 3.2485447959084092e+02; xcrref[4] = 3.2707541254659363e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 5.2969847140936856e+01; xceref[1] = 4.4632896115670668e+00; xceref[2] = 1.3122573342210174e+01; xceref[3] = 1.2006925323559144e+01; xceref[4] = 1.2459576151035986e+02; /*-------------------------------------------------------------------- c reference data for 162X162X162 grids after 200 time steps, c with DT = 1.0d-04 c-------------------------------------------------------------------*/ } else if (grid_points[0] == 162 && grid_points[1] == 162 && grid_points[2] == 162 && no_time_steps == 200) { *class = 'C'; dtref = 1.0e-4; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual. c-------------------------------------------------------------------*/ xcrref[0] = 0.62398116551764615e+04; xcrref[1] = 0.50793239190423964e+03; xcrref[2] = 0.15423530093013596e+04; xcrref[3] = 0.13302387929291190e+04; xcrref[4] = 0.11604087428436455e+05; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error. c-------------------------------------------------------------------*/ xceref[0] = 0.16462008369091265e+03; xceref[1] = 0.11497107903824313e+02; xceref[2] = 0.41207446207461508e+02; xceref[3] = 0.37087651059694167e+02; xceref[4] = 0.36211053051841265e+03; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. c-------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. c-------------------------------------------------------------------*/ if (*class != 'U') { printf(" Verification being performed for class %1c\n", *class); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d%20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d%20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified == TRUE) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c c Performs line solves in X direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c c-------------------------------------------------------------------*/ lhsx(); x_solve_cell(); x_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(isize)=rhs[isize) c else assume U(isize) is loaded in un pack backsub_info c so just use it c after call u(istart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (i = grid_points[0]-2; i >= 0; i--) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void x_solve_cell(void) { /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(IMAX) and rhs'(IMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i,j,k,isize; isize = grid_points[0]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(0,j,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[0][j][k][BB], lhs[0][j][k][CC], rhs[0][j][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (i = 1; i < isize; i++) { #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(i) = rhs(i) - A*rhs(i-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i-1][j][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(i) = B(i) - C(i-1)*A(i) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i-1][j][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (j = 1; j < grid_points[1]-1; j++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(isize) = rhs(isize) - A*rhs(isize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[isize][j][k][AA], rhs[isize-1][j][k], rhs[isize][j][k]); /*-------------------------------------------------------------------- c B(isize) = B(isize) - C(isize-1)*A(isize) c-------------------------------------------------------------------*/ matmul_sub(lhs[isize][j][k][AA], lhs[isize-1][j][k][CC], lhs[isize][j][k][BB]); /*-------------------------------------------------------------------- c multiply rhs() by b_inverse() and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][k][BB], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts bvec=bvec - ablock*avec c-------------------------------------------------------------------*/ int i; for (i = 0; i < 5; i++) { /*-------------------------------------------------------------------- c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell) c $ - lhs[i,1,ablock,ia,ja,ka,acell)* c-------------------------------------------------------------------*/ bvec[i] = bvec[i] - ablock[i][0]*avec[0] - ablock[i][1]*avec[1] - ablock[i][2]*avec[2] - ablock[i][3]*avec[3] - ablock[i][4]*avec[4]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k) c-------------------------------------------------------------------*/ int j; for (j = 0; j < 5; j++) { cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j] - ablock[0][1]*bblock[1][j] - ablock[0][2]*bblock[2][j] - ablock[0][3]*bblock[3][j] - ablock[0][4]*bblock[4][j]; cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j] - ablock[1][1]*bblock[1][j] - ablock[1][2]*bblock[2][j] - ablock[1][3]*bblock[3][j] - ablock[1][4]*bblock[4][j]; cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j] - ablock[2][1]*bblock[1][j] - ablock[2][2]*bblock[2][j] - ablock[2][3]*bblock[3][j] - ablock[2][4]*bblock[4][j]; cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j] - ablock[3][1]*bblock[1][j] - ablock[3][2]*bblock[2][j] - ablock[3][3]*bblock[3][j] - ablock[3][4]*bblock[4][j]; cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j] - ablock[4][1]*bblock[1][j] - ablock[4][2]*bblock[2][j] - ablock[4][3]*bblock[3][j] - ablock[4][4]*bblock[4][j]; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; c[0][0] = c[0][0]*pivot; c[0][1] = c[0][1]*pivot; c[0][2] = c[0][2]*pivot; c[0][3] = c[0][3]*pivot; c[0][4] = c[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; c[1][0] = c[1][0] - coeff*c[0][0]; c[1][1] = c[1][1] - coeff*c[0][1]; c[1][2] = c[1][2] - coeff*c[0][2]; c[1][3] = c[1][3] - coeff*c[0][3]; c[1][4] = c[1][4] - coeff*c[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; c[2][0] = c[2][0] - coeff*c[0][0]; c[2][1] = c[2][1] - coeff*c[0][1]; c[2][2] = c[2][2] - coeff*c[0][2]; c[2][3] = c[2][3] - coeff*c[0][3]; c[2][4] = c[2][4] - coeff*c[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; c[3][0] = c[3][0] - coeff*c[0][0]; c[3][1] = c[3][1] - coeff*c[0][1]; c[3][2] = c[3][2] - coeff*c[0][2]; c[3][3] = c[3][3] - coeff*c[0][3]; c[3][4] = c[3][4] - coeff*c[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; c[4][0] = c[4][0] - coeff*c[0][0]; c[4][1] = c[4][1] - coeff*c[0][1]; c[4][2] = c[4][2] - coeff*c[0][2]; c[4][3] = c[4][3] - coeff*c[0][3]; c[4][4] = c[4][4] - coeff*c[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; c[1][0] = c[1][0]*pivot; c[1][1] = c[1][1]*pivot; c[1][2] = c[1][2]*pivot; c[1][3] = c[1][3]*pivot; c[1][4] = c[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; c[0][0] = c[0][0] - coeff*c[1][0]; c[0][1] = c[0][1] - coeff*c[1][1]; c[0][2] = c[0][2] - coeff*c[1][2]; c[0][3] = c[0][3] - coeff*c[1][3]; c[0][4] = c[0][4] - coeff*c[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; c[2][0] = c[2][0] - coeff*c[1][0]; c[2][1] = c[2][1] - coeff*c[1][1]; c[2][2] = c[2][2] - coeff*c[1][2]; c[2][3] = c[2][3] - coeff*c[1][3]; c[2][4] = c[2][4] - coeff*c[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; c[3][0] = c[3][0] - coeff*c[1][0]; c[3][1] = c[3][1] - coeff*c[1][1]; c[3][2] = c[3][2] - coeff*c[1][2]; c[3][3] = c[3][3] - coeff*c[1][3]; c[3][4] = c[3][4] - coeff*c[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; c[4][0] = c[4][0] - coeff*c[1][0]; c[4][1] = c[4][1] - coeff*c[1][1]; c[4][2] = c[4][2] - coeff*c[1][2]; c[4][3] = c[4][3] - coeff*c[1][3]; c[4][4] = c[4][4] - coeff*c[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; c[2][0] = c[2][0]*pivot; c[2][1] = c[2][1]*pivot; c[2][2] = c[2][2]*pivot; c[2][3] = c[2][3]*pivot; c[2][4] = c[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; c[0][0] = c[0][0] - coeff*c[2][0]; c[0][1] = c[0][1] - coeff*c[2][1]; c[0][2] = c[0][2] - coeff*c[2][2]; c[0][3] = c[0][3] - coeff*c[2][3]; c[0][4] = c[0][4] - coeff*c[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; c[1][0] = c[1][0] - coeff*c[2][0]; c[1][1] = c[1][1] - coeff*c[2][1]; c[1][2] = c[1][2] - coeff*c[2][2]; c[1][3] = c[1][3] - coeff*c[2][3]; c[1][4] = c[1][4] - coeff*c[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; c[3][0] = c[3][0] - coeff*c[2][0]; c[3][1] = c[3][1] - coeff*c[2][1]; c[3][2] = c[3][2] - coeff*c[2][2]; c[3][3] = c[3][3] - coeff*c[2][3]; c[3][4] = c[3][4] - coeff*c[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; c[4][0] = c[4][0] - coeff*c[2][0]; c[4][1] = c[4][1] - coeff*c[2][1]; c[4][2] = c[4][2] - coeff*c[2][2]; c[4][3] = c[4][3] - coeff*c[2][3]; c[4][4] = c[4][4] - coeff*c[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; c[3][0] = c[3][0]*pivot; c[3][1] = c[3][1]*pivot; c[3][2] = c[3][2]*pivot; c[3][3] = c[3][3]*pivot; c[3][4] = c[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; c[0][0] = c[0][0] - coeff*c[3][0]; c[0][1] = c[0][1] - coeff*c[3][1]; c[0][2] = c[0][2] - coeff*c[3][2]; c[0][3] = c[0][3] - coeff*c[3][3]; c[0][4] = c[0][4] - coeff*c[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; c[1][0] = c[1][0] - coeff*c[3][0]; c[1][1] = c[1][1] - coeff*c[3][1]; c[1][2] = c[1][2] - coeff*c[3][2]; c[1][3] = c[1][3] - coeff*c[3][3]; c[1][4] = c[1][4] - coeff*c[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; c[2][0] = c[2][0] - coeff*c[3][0]; c[2][1] = c[2][1] - coeff*c[3][1]; c[2][2] = c[2][2] - coeff*c[3][2]; c[2][3] = c[2][3] - coeff*c[3][3]; c[2][4] = c[2][4] - coeff*c[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; c[4][0] = c[4][0] - coeff*c[3][0]; c[4][1] = c[4][1] - coeff*c[3][1]; c[4][2] = c[4][2] - coeff*c[3][2]; c[4][3] = c[4][3] - coeff*c[3][3]; c[4][4] = c[4][4] - coeff*c[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; c[4][0] = c[4][0]*pivot; c[4][1] = c[4][1]*pivot; c[4][2] = c[4][2]*pivot; c[4][3] = c[4][3]*pivot; c[4][4] = c[4][4]*pivot; r[4] = r[4] *pivot; coeff = lhs[0][4]; c[0][0] = c[0][0] - coeff*c[4][0]; c[0][1] = c[0][1] - coeff*c[4][1]; c[0][2] = c[0][2] - coeff*c[4][2]; c[0][3] = c[0][3] - coeff*c[4][3]; c[0][4] = c[0][4] - coeff*c[4][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; c[1][0] = c[1][0] - coeff*c[4][0]; c[1][1] = c[1][1] - coeff*c[4][1]; c[1][2] = c[1][2] - coeff*c[4][2]; c[1][3] = c[1][3] - coeff*c[4][3]; c[1][4] = c[1][4] - coeff*c[4][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; c[2][0] = c[2][0] - coeff*c[4][0]; c[2][1] = c[2][1] - coeff*c[4][1]; c[2][2] = c[2][2] - coeff*c[4][2]; c[2][3] = c[2][3] - coeff*c[4][3]; c[2][4] = c[2][4] - coeff*c[4][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; c[3][0] = c[3][0] - coeff*c[4][0]; c[3][1] = c[3][1] - coeff*c[4][1]; c[3][2] = c[3][2] - coeff*c[4][2]; c[3][3] = c[3][3] - coeff*c[4][3]; c[3][4] = c[3][4] - coeff*c[4][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void binvrhs( double lhs[5][5], double r[5] ) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ double pivot, coeff; /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ pivot = 1.00/lhs[0][0]; lhs[0][1] = lhs[0][1]*pivot; lhs[0][2] = lhs[0][2]*pivot; lhs[0][3] = lhs[0][3]*pivot; lhs[0][4] = lhs[0][4]*pivot; r[0] = r[0] *pivot; coeff = lhs[1][0]; lhs[1][1]= lhs[1][1] - coeff*lhs[0][1]; lhs[1][2]= lhs[1][2] - coeff*lhs[0][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[0][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[0][4]; r[1] = r[1] - coeff*r[0]; coeff = lhs[2][0]; lhs[2][1]= lhs[2][1] - coeff*lhs[0][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[0][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[0][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[0][4]; r[2] = r[2] - coeff*r[0]; coeff = lhs[3][0]; lhs[3][1]= lhs[3][1] - coeff*lhs[0][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[0][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[0][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[0][4]; r[3] = r[3] - coeff*r[0]; coeff = lhs[4][0]; lhs[4][1]= lhs[4][1] - coeff*lhs[0][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[0][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[0][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[0][4]; r[4] = r[4] - coeff*r[0]; pivot = 1.00/lhs[1][1]; lhs[1][2] = lhs[1][2]*pivot; lhs[1][3] = lhs[1][3]*pivot; lhs[1][4] = lhs[1][4]*pivot; r[1] = r[1] *pivot; coeff = lhs[0][1]; lhs[0][2]= lhs[0][2] - coeff*lhs[1][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[1][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[1][4]; r[0] = r[0] - coeff*r[1]; coeff = lhs[2][1]; lhs[2][2]= lhs[2][2] - coeff*lhs[1][2]; lhs[2][3]= lhs[2][3] - coeff*lhs[1][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[1][4]; r[2] = r[2] - coeff*r[1]; coeff = lhs[3][1]; lhs[3][2]= lhs[3][2] - coeff*lhs[1][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[1][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[1][4]; r[3] = r[3] - coeff*r[1]; coeff = lhs[4][1]; lhs[4][2]= lhs[4][2] - coeff*lhs[1][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[1][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[1][4]; r[4] = r[4] - coeff*r[1]; pivot = 1.00/lhs[2][2]; lhs[2][3] = lhs[2][3]*pivot; lhs[2][4] = lhs[2][4]*pivot; r[2] = r[2] *pivot; coeff = lhs[0][2]; lhs[0][3]= lhs[0][3] - coeff*lhs[2][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[2][4]; r[0] = r[0] - coeff*r[2]; coeff = lhs[1][2]; lhs[1][3]= lhs[1][3] - coeff*lhs[2][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[2][4]; r[1] = r[1] - coeff*r[2]; coeff = lhs[3][2]; lhs[3][3]= lhs[3][3] - coeff*lhs[2][3]; lhs[3][4]= lhs[3][4] - coeff*lhs[2][4]; r[3] = r[3] - coeff*r[2]; coeff = lhs[4][2]; lhs[4][3]= lhs[4][3] - coeff*lhs[2][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[2][4]; r[4] = r[4] - coeff*r[2]; pivot = 1.00/lhs[3][3]; lhs[3][4] = lhs[3][4]*pivot; r[3] = r[3] *pivot; coeff = lhs[0][3]; lhs[0][4]= lhs[0][4] - coeff*lhs[3][4]; r[0] = r[0] - coeff*r[3]; coeff = lhs[1][3]; lhs[1][4]= lhs[1][4] - coeff*lhs[3][4]; r[1] = r[1] - coeff*r[3]; coeff = lhs[2][3]; lhs[2][4]= lhs[2][4] - coeff*lhs[3][4]; r[2] = r[2] - coeff*r[3]; coeff = lhs[4][3]; lhs[4][4]= lhs[4][4] - coeff*lhs[3][4]; r[4] = r[4] - coeff*r[3]; pivot = 1.00/lhs[4][4]; r[4] = r[4] *pivot; coeff = lhs[0][4]; r[0] = r[0] - coeff*r[4]; coeff = lhs[1][4]; r[1] = r[1] - coeff*r[4]; coeff = lhs[2][4]; r[2] = r[2] - coeff*r[4]; coeff = lhs[3][4]; r[3] = r[3] - coeff*r[4]; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Y direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix][ c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsy(); y_solve_cell(); y_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell][ then generate U(jsize)=rhs(jsize) c else assume U(jsize) is loaded in un pack backsub_info c so just use it c after call u(jstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; for (j = grid_points[1]-2; j >= 0; j--) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void y_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(JMAX) and rhs'(JMAX) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, jsize; jsize = grid_points[1]-1; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c multiply c(i,0,k) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][0][k][BB], lhs[i][0][k][CC], rhs[i][0][k] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (j = 1; j < jsize; j++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(j-1) from lhs_vector(j) c c rhs(j) = rhs(j) - A*rhs(j-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j-1][k], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(j) = B(j) - C(j-1)*A(j) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j-1][k][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (k = 1; k < grid_points[2]-1; k++) { /*-------------------------------------------------------------------- c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][jsize][k][AA], rhs[i][jsize-1][k], rhs[i][jsize][k]); /*-------------------------------------------------------------------- c B(jsize) = B(jsize) - C(jsize-1)*A(jsize) c call matmul_sub(aa,i,jsize,k,c, c $ cc,i,jsize-1,k,c,BB,i,jsize,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][jsize][k][AA], lhs[i][jsize-1][k][CC], lhs[i][jsize][k][BB]); /*-------------------------------------------------------------------- c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][jsize][k][BB], rhs[i][jsize][k] ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs line solves in Z direction by first factoring c the block-tridiagonal matrix into an upper triangular matrix, c and then performing back substitution to solve for the unknow c vectors of each line. c c Make sure we treat elements zero to cell_size in the direction c of the sweep. c-------------------------------------------------------------------*/ lhsz(); z_solve_cell(); z_backsubstitute(); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_backsubstitute(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c back solve: if last cell, then generate U(ksize)=rhs(ksize) c else assume U(ksize) is loaded in un pack backsub_info c so just use it c after call u(kstart) will be sent to next cell c-------------------------------------------------------------------*/ int i, j, k, m, n; #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { for (k = grid_points[2]-2; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[i][j][k][m] = rhs[i][j][k][m] - lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n]; } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void z_solve_cell(void) { /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c performs guaussian elimination on this cell. c c assumes that unpacking routines for non-first cells c preload C' and rhs' from previous cell. c c assumed send happens outside this routine, but that c c'(KMAX) and rhs'(KMAX) will be sent to next cell. c-------------------------------------------------------------------*/ int i,j,k,ksize; ksize = grid_points[2]-1; /*-------------------------------------------------------------------- c outer most do loops - sweeping in i direction c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c multiply c(i,j,0) by b_inverse and copy back to c c multiply rhs(0) by b_inverse(0) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][0][BB], lhs[i][j][0][CC], rhs[i][j][0] ); } } /*-------------------------------------------------------------------- c begin inner most do loop c do all the elements of the cell unless last c-------------------------------------------------------------------*/ for (k = 1; k < ksize; k++) { #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c subtract A*lhs_vector(k-1) from lhs_vector(k) c c rhs(k) = rhs(k) - A*rhs(k-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][k][AA], rhs[i][j][k-1], rhs[i][j][k]); /*-------------------------------------------------------------------- c B(k) = B(k) - C(k-1)*A(k) c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][k][AA], lhs[i][j][k-1][CC], lhs[i][j][k][BB]); /*-------------------------------------------------------------------- c multiply c(i,j,k) by b_inverse and copy back to c c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs c-------------------------------------------------------------------*/ binvcrhs( lhs[i][j][k][BB], lhs[i][j][k][CC], rhs[i][j][k] ); } } } /*-------------------------------------------------------------------- c Now finish up special cases for last cell c-------------------------------------------------------------------*/ #pragma omp for for (i = 1; i < grid_points[0]-1; i++) { for (j = 1; j < grid_points[1]-1; j++) { /*-------------------------------------------------------------------- c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) c-------------------------------------------------------------------*/ matvec_sub(lhs[i][j][ksize][AA], rhs[i][j][ksize-1], rhs[i][j][ksize]); /*-------------------------------------------------------------------- c B(ksize) = B(ksize) - C(ksize-1)*A(ksize) c call matmul_sub(aa,i,j,ksize,c, c $ cc,i,j,ksize-1,c,BB,i,j,ksize) c-------------------------------------------------------------------*/ matmul_sub(lhs[i][j][ksize][AA], lhs[i][j][ksize-1][CC], lhs[i][j][ksize][BB]); /*-------------------------------------------------------------------- c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs c-------------------------------------------------------------------*/ binvrhs( lhs[i][j][ksize][BB], rhs[i][j][ksize] ); } } }
saxpy-omp3.c
/** * @file saxpy.c * * @brief saxpy performs the \c axpy computation in single-precision on both * host and accelerator. The performance (in MFLOPS) on host and accelerator is * compared and the numerical results are also verified for consistency. * * The \c axpy computation is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are vectors each with n elements. * * Please note that in this version only <em>one GPU thread</em> is used. * * Offload to GPU: * * gcc -fopenmp -foffload=nvptx-none saxpy.c * */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #include "utils.h" #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #ifndef N #define N (1 << 27) #endif int main(int argc, char *argv[]) { int i, n = N, iret = 0; float a = 101.0f / TWO02, b, c, *x, *y, *z; struct timespec rt[2]; double wt; // walltime if (argc > 1) n = atoi(argv[1]); /* * 0. prepare x, y, and z * * y := a * x + y (on host) * z := a * x + z (on accel) */ if (NULL == (x = (float *)malloc(sizeof(*x) * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (y = (float *)malloc(sizeof(*y) * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (z = (float *)malloc(sizeof(*z) * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(x); free(y); free(z); exit(EXIT_FAILURE); } b = rand() % TWO04; c = rand() % TWO08; for (i = 0; i < n; i++) { x[i] = b / (float)TWO02; y[i] = z[i] = c / (float)TWO04; } /* * 1. saxpy on host */ clock_gettime(CLOCK_REALTIME, rt + 0); for (i = 0; i < n; i++) { y[i] = a * x[i] + y[i]; } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 2. saxpy on accel */ clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target data map(to \ : a, n, x [0:n]) map(tofrom \ : z [0:n]) #pragma omp target teams num_teams(n / NTHREADS_GPU) thread_limit(NTHREADS_GPU) \ map(to \ : a, n, x [0:n]) map(tofrom \ : z [0:n]) #pragma omp distribute parallel for num_threads(NTHREADS_GPU) \ dist_schedule(static, NTHREADS_GPU) for (int i = 0; i < n; i++) { z[i] = a * x[i] + z[i]; } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 3. verify numerical consistency */ for (i = 0; i < n; i++) { iret = *(int *)(y + i) ^ *(int *)(z + i); assert(iret == 0); } return 0; }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % John Cristy % % March 2003 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/MagickCore.h" #include "magick/exception-private.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; size_t number_threads; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->number_threads=image_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DuplexTransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticIndexes() returns the image view authentic indexes. % % The format of the GetImageViewAuthenticPixels method is: % % IndexPacket *GetImageViewAuthenticIndexes(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport IndexPacket *GetImageViewAuthenticIndexes( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewAuthenticIndexQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % PixelPacket *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport PixelPacket *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MaxTextExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualIndexes() returns the image view virtual indexes. % % The format of the GetImageViewVirtualIndexes method is: % % const IndexPacket *GetImageViewVirtualIndexes( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const IndexPacket *GetImageViewVirtualIndexes( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewVirtualIndexQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const PixelPacket *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const PixelPacket *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand) % % A description of each parameter follows: % % o wand: the wand. % */ MagickExport ImageView *NewImageView(Image *image) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireCacheView(image_view->image); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->number_threads=GetOpenMPMaximumThreads(); image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view)); if (image_view == (ImageView *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireCacheView(image_view->image); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->number_threads=GetOpenMPMaximumThreads(); image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(destination->number_threads) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetImageViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewThreads() sets the number of threads in a thread team. % % The format of the SetImageViewDescription method is: % % void SetImageViewThreads(ImageView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetImageViewThreads(ImageView *image_view, const size_t number_threads) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickSignature); image_view->number_threads=number_threads; if (number_threads > GetOpenMPMaximumThreads()) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const PixelPacket *restrict pixels; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) shared(progress,status) num_threads(source->number_threads) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UpdateImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
nco_ply_lst.c
/* $Header$ */ /* Purpose: Functions that manipulate lists of polygons */ /* Copyright (C) 2018--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_ply_lst.h" void nco_poly_re_org_lst( /* for each poly_sct* in list re-order points so that first point is the leftermost point */ poly_sct **pl_lst, int arr_nbr) { int idx=0; int jdx=0; int max_crn_nbr=0; double *lcl_dp_x; double *lcl_dp_y; /* max crn_nbr */ for(idx=0 ; idx<arr_nbr ;idx++) if( pl_lst[idx]->crn_nbr > max_crn_nbr ) max_crn_nbr = pl_lst[idx]->crn_nbr; lcl_dp_x=(double*)nco_calloc(max_crn_nbr, sizeof(double)); lcl_dp_y=(double*)nco_calloc(max_crn_nbr, sizeof(double)); for(idx=0; idx<arr_nbr; idx++) { int lcl_min=0; int crn_nbr=pl_lst[idx]->crn_nbr; double x_min=1.0e-30; /* de-reference */ poly_sct *pl=pl_lst[idx]; /* find index of min X value */ for(jdx=0; jdx<crn_nbr; jdx++) if( pl->dp_x[jdx] < x_min ) { x_min=pl->dp_x[jdx]; lcl_min=jdx;} /* first point already x_min so do nothing */ if( lcl_min == 0) continue; for(jdx=0; jdx<crn_nbr; jdx++) { lcl_dp_x[jdx]=pl->dp_x[(jdx+lcl_min)%crn_nbr]; lcl_dp_y[jdx]=pl->dp_y[(jdx+lcl_min)%crn_nbr]; } /* copy over values */ memcpy(pl->dp_x, lcl_dp_x, (size_t)crn_nbr*sizeof(double)); memcpy(pl->dp_y, lcl_dp_y, (size_t)crn_nbr*sizeof(double)); } lcl_dp_x=(double*)nco_free(lcl_dp_x); lcl_dp_y=(double*)nco_free(lcl_dp_y); return; } poly_sct ** /* [O] [nbr] size of array */ nco_poly_lst_mk( double *area, /* I [sr] Area of source grid */ int *msk, /* I [flg] Mask on source grid */ double *lat_ctr, /* I [dgr] Latitude centers of source grid */ double *lon_ctr, /* I [dgr] Longitude centers of source grid */ double *lat_crn, /* I [dgr] Latitude corners of source grid */ double *lon_crn, /* I [dgr] Longitude corners of source grid */ size_t grd_sz, /* I [nbr] Number of elements in single layer of source grid */ long grd_crn_nbr, /* I [nbr] Maximum number of corners in source gridcell */ nco_grd_lon_typ_enm grd_lon_typ, /* I [num] if not nil then split cells that straddle Greenwich or Dateline */ poly_typ_enm pl_typ, int *pl_nbr) { const char fnc_nm[]="nco_poly_lst_mk()"; int idx=0; int idx_cnt=0; int cnt_wrp_good=0; nco_bool bwrp; double *lat_ptr=lat_crn; double *lon_ptr=lon_crn; /* buffers used in nco-poly_re_org() */ double lcl_dp_x[VP_MAX]={0}; double lcl_dp_y[VP_MAX]={0}; poly_sct *pl; poly_sct *pl_wrp_left; poly_sct *pl_wrp_right; poly_sct **pl_lst; /* start with twice the grid size as we may be splitting the cells along the Greenwich meridian or dateline */ /* realloc at the end */ pl_lst=(poly_sct**)nco_malloc( sizeof (poly_sct*) * grd_sz *2 ); // printf("About to print poly sct grd_sz=%d grd_crn_nbr=%d\n", grd_sz, grd_crn_nbr); for(idx=0;idx<grd_sz; idx++) { /* check mask and area */ if( msk[idx]==0 || area[idx] == 0.0) continue; pl=nco_poly_init_lst(pl_typ, grd_crn_nbr,0, idx, lon_ptr, lat_ptr); lon_ptr+=(size_t)grd_crn_nbr; lat_ptr+=(size_t)grd_crn_nbr; /* if poly is less than a triangle then null is returned*/ if(!pl) continue; /* add min max */ nco_poly_minmax_add(pl, grd_lon_typ, False); nco_poly_re_org(pl, lcl_dp_x, lcl_dp_y); /* use Charlie's formula */ nco_poly_area_add(pl); //if(pl->dp_x_minmax[0] <0.0 || (pl->dp_x_minmax[1] - pl->dp_x_minmax[0]) > 30 ) if( !(pl->dp_x_minmax[1] - pl->dp_x_minmax[0] < 180.0 && lon_ctr[idx] >= pl->dp_x_minmax[0] && lon_ctr[idx] <= pl->dp_x_minmax[1] )) { (void)fprintf(stdout, "/***%s: %s: invalid polygon to follow *******?", nco_prg_nm_get(), fnc_nm); nco_poly_prn(pl, 0); pl=nco_poly_free(pl); continue; } //fprintf(stdout,"/***** input polygon pl lon center=%f convex=%s\n ********************/\n", lon_ctr[idx], (nco_poly_is_convex(pl) ? "True": "False") ); //nco_poly_prn(pl, 0); /* check for wrapping -center outside min/max range */ bwrp=( lon_ctr[idx] < pl->dp_x_minmax[0] || lon_ctr[idx] > pl->dp_x_minmax[1] ); if( grd_lon_typ == nco_grd_lon_nil || grd_lon_typ == nco_grd_lon_unk ) { if( !bwrp ) { pl_lst[idx_cnt++]=pl; } else { (void)fprintf(stdout, "%s: polygon(%d) wrapped - but grd_lon_typ not specified \n", nco_prg_nm_get(), idx); (void)fprintf(stdout, "/*******************************************/\n"); pl=nco_poly_free(pl); } continue; } /* if we are here then grd_lon_typ specifys a grid type */ if( !bwrp) { pl_lst[idx_cnt++]=pl; } /* cell width exceeds max so assume wrapping */ else if( nco_poly_wrp_splt(pl, grd_lon_typ, &pl_wrp_left, &pl_wrp_right ) == NCO_NOERR ) { fprintf(stdout,"/***** pl, wrp_left, wrp_right ********************/\n"); if(pl_wrp_left) { nco_poly_re_org(pl_wrp_left, lcl_dp_x, lcl_dp_y); pl_lst[idx_cnt++]=pl_wrp_left; nco_poly_prn(pl_wrp_left, 2); } if(pl_wrp_right) { nco_poly_re_org(pl_wrp_right, lcl_dp_x, lcl_dp_y); pl_lst[idx_cnt++]=pl_wrp_right; nco_poly_prn(pl_wrp_right, 2); } pl=nco_poly_free(pl); fprintf(stdout,"/**********************************/\n"); cnt_wrp_good++; } else { if(nco_dbg_lvl_get() >= nco_dbg_std ){ (void)fprintf(stdout, "%s: split wrapping didn't work on this polygon(%d)\n", nco_prg_nm_get(), idx ); (void)fprintf(stdout, "/********************************/\n"); } pl=nco_poly_free(pl); } } if(nco_dbg_lvl_get() >= nco_dbg_std ) (void)fprintf(stdout, "%s: %s size input list(%lu), size output list(%d), num of split polygons(%d)\n", nco_prg_nm_get(),fnc_nm, grd_sz, idx_cnt, cnt_wrp_good); pl_lst=(poly_sct**)nco_realloc( pl_lst, (size_t)idx_cnt * sizeof (poly_sct*) ); *pl_nbr=idx_cnt; return pl_lst; } poly_sct ** /* [O] [nbr] size of array */ nco_poly_lst_mk_rll( double *area, /* I [sr] Area of source grid */ int *msk, /* I [flg] Mask on source grid */ double *lat_ctr, /* I [dgr] Latitude centers of source grid */ double *lon_ctr, /* I [dgr] Longitude centers of source grid */ double *lat_crn, /* I [dgr] Latitude corners of source grid */ double *lon_crn, /* I [dgr] Longitude corners of source grid */ size_t grd_sz, /* I [nbr] Number of elements in single layer of source grid */ long grd_crn_nbr, /* I [nbr] Maximum number of corners in source gridcell */ nco_grd_lon_typ_enm grd_lon_typ) /* I [num] */ { int idx=0; int wrp_cnt=0; int wrp_y_cnt=0; int msk_cnt=0; const char fnc_nm[]="nco_poly_lst_mk_rll()"; nco_bool bwrp; /* no polar caps on a RLL grid */ nco_bool bchk_caps=False; double tot_area=0.0; double *lat_ptr=lat_crn; double *lon_ptr=lon_crn; poly_sct *pl=(poly_sct*)NULL_CEWI; /* contains plain struct sct with bmsk=False; */ poly_sct *pl_msk=((poly_sct*)NULL_CEWI); poly_sct **pl_lst; /* list size is grd_sz - invalid or masked polygons are repesented by a poly_sct->bmsk=False */ pl_lst=(poly_sct**)nco_malloc( sizeof (poly_sct*) * grd_sz); pl_msk=nco_poly_init(); pl_msk->bmsk=False; /* filter out wrapped lon cells */ if( grd_lon_typ == nco_grd_lon_nil || grd_lon_typ == nco_grd_lon_unk || grd_lon_typ == nco_grd_lon_bb ) bwrp=False; else bwrp=True; // printf("About to print poly sct grd_sz=%d grd_crn_nbr=%d\n", grd_sz, grd_crn_nbr); for(idx=0;idx<grd_sz; idx++) { /* check mask and area */ if( msk[idx]==0 || area[idx] == 0.0 ) { pl_lst[idx]= nco_poly_dpl(pl_msk); msk_cnt++; continue; } pl=nco_poly_init_lst(poly_rll, grd_crn_nbr,0, idx, lon_ptr, lat_ptr); lon_ptr+=(size_t)grd_crn_nbr; lat_ptr+=(size_t)grd_crn_nbr; /* if poly is less than a triangle then null is returned*/ if(!pl ) { if(nco_dbg_lvl_get()>= nco_dbg_dev) fprintf(stderr, "%s(): WARNING cell(id=%d) less than a triange\n", fnc_nm, idx); pl_lst[idx]= nco_poly_dpl(pl_msk); msk_cnt++; continue; } /* add centroid from input */ pl->dp_x_ctr=lon_ctr[idx]; pl->dp_y_ctr=lat_ctr[idx]; /* pop shp */ nco_poly_shp_pop(pl); /* add min max */ nco_poly_minmax_add(pl, grd_lon_typ, bchk_caps); /* if coords cannot deal with wrapping */ if( pl->bwrp && bwrp==False ) { pl=nco_poly_free(pl); pl_lst[idx]= nco_poly_dpl(pl_msk); msk_cnt++; continue; } /* The area of an RLL grid needs to be re-calculated as we have to take account of lines of latitude as great circles */ nco_poly_area_add(pl); /* area NOT set so add to the area - nb this will be eventually written to the map file in nco_map_mk */ if(area[idx]==-1.0) area[idx]=pl->area; /* simple center of a rll cell - should always be inside of polygon */ nco_poly_ctr_add(pl, grd_lon_typ); if(nco_dbg_lvl_get()>= nco_dbg_dev ) if(pl->bwrp) nco_poly_prn(pl,0); /* for debugging */ tot_area+=pl->area; /* for debugging total number of wrapped cells */ wrp_cnt+=pl->bwrp; pl_lst[idx]=pl; } if(nco_dbg_lvl_get() >= nco_dbg_dev ) (void)fprintf(stderr, "%s: %s size input list(%lu), size output list(%lu) total area=%.15e num wrapped= %d num caps=%d num masked=%d\n", nco_prg_nm_get(),fnc_nm, grd_sz, grd_sz, tot_area, wrp_cnt, wrp_y_cnt, msk_cnt); pl_msk=nco_poly_free(pl_msk); return pl_lst; } poly_sct ** /* [O] [nbr] size of array */ nco_poly_lst_mk_sph( double *area, /* I [sr] Area of source grid */ int *msk, /* I [flg] Mask on source grid */ double *lat_ctr, /* I [dgr] Latitude centers of source grid */ double *lon_ctr, /* I [dgr] Longitude centers of source grid */ double *lat_crn, /* I [dgr] Latitude corners of source grid */ double *lon_crn, /* I [dgr] Longitude corners of source grid */ size_t grd_sz, /* I [nbr] Number of elements in single layer of source grid */ long grd_crn_nbr, /* I [nbr] Maximum number of corners in source gridcell */ nco_grd_lon_typ_enm grd_lon_typ) /* I [num] */ { int idx=0; int wrp_cnt=0; int wrp_y_cnt=0; int msk_cnt=0; const char fnc_nm[]="nco_poly_lst_mk_sph()"; nco_bool bwrp; /* check to see if cell is a polar cap */ nco_bool bchk_caps=True; double tot_area=0.0; double *lat_ptr=lat_crn; double *lon_ptr=lon_crn; /* buffers used in nco-poly_re_org() */ poly_sct *pl=(poly_sct*)NULL_CEWI; /* contains plain struct sct with bmsk=False; */ poly_sct *pl_msk=((poly_sct*)NULL_CEWI); poly_sct **pl_lst; /* list size is grd_sz - invalid or masked polygons are repesented by a poly_sct->bmsk=False */ pl_lst=(poly_sct**)nco_malloc( sizeof (poly_sct*) * grd_sz); pl_msk=nco_poly_init(); pl_msk->bmsk=False; /* filter out wrapped lon cells */ if( grd_lon_typ == nco_grd_lon_nil || grd_lon_typ == nco_grd_lon_unk || grd_lon_typ == nco_grd_lon_bb ) bwrp=False; else bwrp=True; // printf("About to print poly sct grd_sz=%d grd_crn_nbr=%d\n", grd_sz, grd_crn_nbr); for(idx=0;idx<grd_sz; idx++) { /* check mask and area */ if( msk[idx]==0 || area[idx] == 0.0 ) { pl_lst[idx]= nco_poly_dpl(pl_msk); msk_cnt++; continue; } pl=nco_poly_init_lst(poly_sph, grd_crn_nbr,0, idx, lon_ptr, lat_ptr); lon_ptr+=(size_t)grd_crn_nbr; lat_ptr+=(size_t)grd_crn_nbr; /* if poly is less than a triangle then null is returned*/ if(!pl ) { if(nco_dbg_lvl_get()>= nco_dbg_dev) fprintf(stderr, "%s(): WARNING cell(id=%d) less than a triange\n", fnc_nm, idx); pl_lst[idx]= nco_poly_dpl(pl_msk); msk_cnt++; continue; } /* add centroid from input */ pl->dp_x_ctr=lon_ctr[idx]; pl->dp_y_ctr=lat_ctr[idx]; /* pop shp */ nco_poly_shp_pop(pl); /* add min max */ nco_poly_minmax_add(pl, grd_lon_typ, bchk_caps); /* if coords cannot deal with wrapping */ if( pl->bwrp && bwrp==False ) { pl=nco_poly_free(pl); pl_lst[idx]= nco_poly_dpl(pl_msk); msk_cnt++; continue; } /* The area of an RLL grid needs to be re-calculated as we have to take account of lines of latitude as great circles */ nco_poly_area_add(pl); /* area NOT set so add to the area - nb this will be eventually written to the map file in nco_map_mk */ if(area[idx]==-1.0) area[idx]=pl->area; /* fxm:2019-06-07 - there is a problem using the polygon center as a control point as * for some RLL grids the center of a polar triangle can be the pole */ /* The centroid can be outside of a convex polygon * for nco_sph_intersect_pre() to work correctly it requires a point INSIDE the convex polygon * So we use a custom function : * just remember FROM HERE on that the pl->dp_x_ctr, pl->dp_y_ctr iS NOT the Centroid * */ /* if(nco_sph_inside_mk(pl,pControl )) { pl->dp_x_ctr=R2D(pControl[3]); pl->dp_y_ctr=R2D(pControl[4]); } else nco_poly_ctr_add(pl, grd_lon_typ); */ if(nco_dbg_lvl_get()>= nco_dbg_dev ) if(pl->bwrp) nco_poly_prn(pl,0); /* for debugging */ tot_area+=pl->area; /* for debugging total number of wrapped cells */ wrp_cnt+=pl->bwrp; wrp_y_cnt+=pl->bwrp_y; pl_lst[idx]=pl; } if(nco_dbg_lvl_get() >= nco_dbg_dev ) (void)fprintf(stderr, "%s: %s size input list(%lu), size output list(%lu) total area=%.15e num wrapped= %d num caps=%d num masked=%d\n", nco_prg_nm_get(),fnc_nm, grd_sz, grd_sz, tot_area, wrp_cnt, wrp_y_cnt, msk_cnt); pl_msk=nco_poly_free(pl_msk); return pl_lst; } poly_sct ** nco_poly_lst_free( poly_sct **pl_lst, int arr_nbr) { int idx; for(idx=0; idx<arr_nbr; idx++) pl_lst[idx]=nco_poly_free(pl_lst[idx]); pl_lst=(poly_sct**)nco_free(pl_lst); return pl_lst; } void nco_poly_set_priority( int nbr_lst, KDPriority *list){ int idx; for(idx=0;idx<nbr_lst;idx++){ list[idx].dist = 1.1; list[idx].elem = (KDElem*)NULL; } return ; } poly_sct ** nco_poly_lst_mk_vrl_crt( /* create overlap mesh for crt polygons */ poly_sct **pl_lst_in, int pl_cnt_in, KDTree *rtree, int *pl_cnt_vrl_ret){ /* just duplicate output list to overlap */ size_t idx; size_t jdx; int max_nbr_vrl=1000; int pl_cnt_vrl=0; //nco_bool bSort=True; const char fnc_nm[]="nco_poly_mk_vrl_crt()"; /* buffers used in nco-poly_re_org() */ double lcl_dp_x[VP_MAX]={0}; double lcl_dp_y[VP_MAX]={0}; kd_box size; poly_sct ** pl_lst_vrl=NULL_CEWI; KDPriority *list; list = (KDPriority *)nco_calloc(sizeof(KDPriority),(size_t)max_nbr_vrl); printf("INFO - entered function nco_poly_mk_vrl\n"); /* start main loop over input polygons */ for(idx=0 ; idx<pl_cnt_in ;idx++ ) { int cnt_vrl=0; int cnt_vrl_on=0; (void)nco_poly_set_priority(max_nbr_vrl,list); /* get bounds of polygon in */ size[KD_LEFT] = pl_lst_in[idx]->dp_x_minmax[0]; size[KD_RIGHT] = pl_lst_in[idx]->dp_x_minmax[1]; size[KD_BOTTOM] = pl_lst_in[idx]->dp_y_minmax[0]; size[KD_TOP] = pl_lst_in[idx]->dp_y_minmax[1]; /* find overlapping polygons */ // cnt_vrl=kd_nearest_intersect(rtree, size, max_nbr_vrl,list,bSort ); /* nco_poly_prn(2, pl_lst_in[idx] ); */ for(jdx=0; jdx <cnt_vrl ;jdx++){ poly_sct *pl_vrl=(poly_sct*)NULL_CEWI; poly_sct *pl_out=(poly_sct*)list[jdx].elem->item; ; // nco_poly_prn(2, pl_out); /* check for polygon in polygon first */ if( nco_crt_poly_in_poly(pl_lst_in[idx], pl_out) == pl_out->crn_nbr ) { //fprintf(stderr,"%s: using poly_in_poly()\n", fnc_nm); pl_vrl=nco_poly_dpl(pl_out); } else pl_vrl= nco_poly_vrl_do(pl_lst_in[idx], pl_out, 0, (char*)NULL); if(pl_vrl){ nco_poly_re_org(pl_vrl, lcl_dp_x, lcl_dp_y); /* add area */ nco_poly_area_add(pl_vrl); /* shp not needed */ nco_poly_shp_free(pl_vrl); pl_lst_vrl=(poly_sct**)nco_realloc(pl_lst_vrl, sizeof(poly_sct*) * (pl_cnt_vrl+1)); pl_lst_vrl[pl_cnt_vrl]=pl_vrl; pl_cnt_vrl++; cnt_vrl_on++; if(nco_poly_is_convex(pl_vrl) == False ) { fprintf(stderr,"%s: %s vrl polygon convex=0 vrl ,in convex=%d ,out convex=%d\n", nco_prg_nm_get(), fnc_nm, nco_poly_is_convex(pl_lst_in[idx]), nco_poly_is_convex(pl_out) ); nco_poly_prn(pl_vrl, 2); nco_poly_prn(pl_lst_in[idx], 2); nco_poly_prn(pl_out, 2); } //fprintf(stderr,"Overlap polygon to follow\n"); //nco_poly_prn(2, pl_vrl); } } if( nco_dbg_lvl_get() >= nco_dbg_dev ) (void) fprintf(stderr, "%s: total overlaps=%d for polygon %lu - potential overlaps=%d actual overlaps=%d\n", nco_prg_nm_get(), pl_cnt_vrl, idx, cnt_vrl, cnt_vrl_on); } list = (KDPriority *)nco_free(list); /* return size of list */ *pl_cnt_vrl_ret=pl_cnt_vrl; return pl_lst_vrl; } void ** nco_poly_lst_mk_vrl( /* create overlap mesh for sph polygons */ poly_sct **pl_lst_in, int pl_cnt_in, nco_grd_lon_typ_enm grd_lon_typ, poly_typ_enm pl_typ, KDTree **tree, int nbr_tr, int lst_out_typ, int *pl_cnt_vrl_ret){ /* just duplicate output list to overlap */ const char fnc_nm[]="nco_poly_lst_mk_vrl()"; nco_bool bDirtyRats=False; nco_bool bSort=True; int pl_cnt_vrl=0; int thr_idx=0; int pl_cnt_dbg=0; int tot_nan_cnt=0; int tot_wrp_cnt=0; /* approx number of input cells each thread will process */ int thr_quota; /* reporting step */ int thr_quota_step; size_t idx; int lcl_thr_nbr; omp_mem_sct *mem_lst=NULL_CEWI; double tot_area=0.0; void** void_lst_vrl=NULL_CEWI; poly_sct** pl_lst_dbg=NULL_CEWI; FILE * const fp_stderr=stderr; lcl_thr_nbr=omp_get_max_threads(); mem_lst=(omp_mem_sct*)nco_malloc(sizeof(omp_mem_sct)*lcl_thr_nbr); for(idx=0;idx<lcl_thr_nbr;idx++) { mem_lst[idx].pl_lst=NULL_CEWI; mem_lst[idx].wgt_lst=NULL_CEWI; mem_lst[idx].blk_nbr=0; mem_lst[idx].pl_cnt=0; mem_lst[idx].kd_list=(KDPriority *)nco_calloc(sizeof(KDPriority),(size_t)(NCO_VRL_BLOCKSIZE)); mem_lst[idx].kd_cnt=0; mem_lst[idx].kd_blk_nbr=1; mem_lst[idx].idx_cnt=0; } thr_quota=pl_cnt_in/lcl_thr_nbr; thr_quota_step=thr_quota/20; if( thr_quota_step <2000 ) thr_quota_step=2000; /* NB: "OpenMP notes" section of nco_rgr.c has detailed discussion of these settings Henry, please keep the variables in alphabetical order within a clause and remember to update Intel */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ #endif /* !__GNUC__ */ #if defined(__INTEL_COMPILER) # pragma omp parallel for private(idx,thr_idx) schedule(dynamic,40) shared(bDirtyRats,bSort,grd_lon_typ,nbr_tr,pl_cnt_dbg,pl_typ,tree,tot_nan_cnt,tot_wrp_cnt) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) private(idx,thr_idx) shared(bDirtyRats,bSort,grd_lon_typ,nbr_tr,pl_cnt_dbg,pl_typ,tree,tot_nan_cnt,tot_wrp_cnt) # else /* !old g++ */ # pragma omp parallel for private(idx,thr_idx) schedule(dynamic,40) shared(bDirtyRats,bSort,grd_lon_typ,nbr_tr,pl_cnt_dbg,pl_typ,tree,tot_nan_cnt,tot_wrp_cnt) # endif /* !old g++ */ #endif /* !__INTEL_COMPILER */ for(idx=0 ; idx<pl_cnt_in ;idx++ ) { nco_bool bSplit=False; int vrl_cnt = 0; int vrl_cnt_on = 0; int nan_cnt=0; int wrp_cnt=0; size_t jdx; double vrl_area = 0.0; kd_box size1; kd_box size2; // (void) nco_poly_set_priority(max_nbr_vrl, list); thr_idx=omp_get_thread_num(); if (0 && nco_dbg_lvl_get() >= nco_dbg_dev) fprintf(fp_stderr, "%s(): idx=%lu thr=%d\n",fnc_nm, idx, thr_idx); if(pl_lst_in[idx]->bmsk==False) goto cont_msk; /* need to iterate mem_lst diagnostics at end of loop*/ mem_lst[thr_idx].kd_cnt=0; if(mem_lst[thr_idx].kd_blk_nbr >1) { mem_lst[thr_idx].kd_blk_nbr=1; mem_lst[thr_idx].kd_list=(KDPriority*)nco_free(mem_lst[thr_idx].kd_list); //mem_lst[thr_idx].kd_list=(KDPriority*)nco_realloc(mem_lst[idx].kd_list, sizeof(KDPriority) * NCO_VRL_BLOCKSIZE ); mem_lst[idx].kd_list=(KDPriority *)nco_calloc(sizeof(KDPriority),(size_t)(NCO_VRL_BLOCKSIZE)); } /* get bounds of polygon in */ bSplit=nco_poly_minmax_split(pl_lst_in[idx],grd_lon_typ, size1,size2 ); /* if a wrapped polygon then do two searches */ if(bSplit) vrl_cnt = kd_nearest_intersect_wrp(tree, nbr_tr, size1, size2, &mem_lst[thr_idx]); else vrl_cnt = kd_nearest_intersect(tree, nbr_tr, size1, &mem_lst[thr_idx], bSort); /* nco_poly_prn(2, pl_lst_in[idx] ); */ for (jdx = 0; jdx < vrl_cnt; jdx++) { poly_sct *pl_vrl = NULL_CEWI; poly_sct *pl_out = (poly_sct *) mem_lst[thr_idx].kd_list[jdx].elem->item; /* for area debug only */ mem_lst[thr_idx].kd_list[jdx].area=-1.0; mem_lst[thr_idx].kd_list[jdx].dbg_sng[0]='\0'; /* if (pl_lst_in[idx]->pl_typ != pl_out->pl_typ) { fprintf(stderr, "%s: %s poly type mismatch\n", nco_prg_nm_get(), fnc_nm); continue; } */ if(pl_typ== poly_rll) { pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, 0, (char*)NULL); /* if pl_vrl is NULL from, nco_poly_do_vrl() then there are 3 possible senario's * * 1) pl_lst_in[idx] and pl_out are seperate and distinct * * 2) pl_lst_in[idx] is entirely inside pl_out. * to check for this it is sufficent to check the * the minmax bounds and then also check that a single vertex * from pl_lst_in[idx] is inside pl_out * * 3) pl_out is entirely inside pl_lst_in[idx] * check minmax bounds then check a single vertex from * pl_out */ if(!pl_vrl) { if (nco_poly_in_poly_minmax(pl_lst_in[idx], pl_out)) pl_vrl = nco_poly_dpl(pl_out); else if (nco_poly_in_poly_minmax(pl_out, pl_lst_in[idx])) pl_vrl = nco_poly_dpl(pl_lst_in[idx]); } if(pl_vrl) { /* add area nb also sets wrapping */ nco_poly_minmax_add(pl_vrl, grd_lon_typ, False); /* REMEMBER poly_rll area uses minmax limits AND NOT VERTEX's */ nco_poly_area_add(pl_vrl); } } if(pl_typ== poly_sph ) { int lret = 0; int lret2=0; /* [flg] set by nco_sph_intersect_pre - if True it means that scan hase detected a genuine intersection so * so no need to do further processing */ nco_bool bGenuine=False; nco_bool bBadArea=False; char in_sng[VP_MAX]; char out_sng[VP_MAX]; int flg_snp_to=2; in_sng[0]='\0'; out_sng[0]='\0'; nco_sph_intersect_pre(pl_lst_in[idx], pl_out, in_sng); if(0 && nco_dbg_lvl_get() >= nco_dbg_dev) (void)fprintf(stderr,"%s:%s(): sp_sng=%s \n",nco_prg_nm_get(),fnc_nm, in_sng ); lret = nco_sph_process_pre(pl_out, in_sng, &bGenuine); switch(lret) { case 1: pl_vrl = nco_poly_dpl(pl_out); break; case 2: pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, (char *)NULL); break; case 3: pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, in_sng); break; case 4: pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, (char*)NULL); break; case 5: pl_vrl = nco_poly_vrl_do(pl_lst_in[idx], pl_out, flg_snp_to, in_sng); break; } if(pl_vrl) { double min_area= ( pl_out->area < pl_lst_in[idx]->area ? pl_out->area : pl_lst_in[idx]->area ); /* add area nb also sets wrapping */ nco_poly_minmax_add(pl_vrl, grd_lon_typ, False); /* REMEMBER poly_rll area uses minmax limits AND NOT VERTEX's */ nco_poly_area_add(pl_vrl); if( isnan( pl_vrl->area) || pl_vrl->area == 0.0 || (pl_vrl->area - min_area)/ min_area > 1.0e-04 ) { pl_vrl = nco_poly_free(pl_vrl); bBadArea=True; } } /* swap args around and try again */ if(!pl_vrl && ( (bGenuine==False && lret !=1 ) || bBadArea ) ) //if(!pl_vrl && ( (bGenuine==False && lret !=1 ) || bBadArea ) ) { flg_snp_to=1; nco_sph_intersect_pre(pl_out, pl_lst_in[idx], out_sng); lret2 = nco_sph_process_pre(pl_lst_in[idx], out_sng, &bGenuine); switch(lret2) { case 1: pl_vrl = nco_poly_dpl(pl_lst_in[idx]); break; case 2: pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, (char *)NULL); break; case 3: pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, out_sng); break; case 4: pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, (char*)NULL); break; case 5: pl_vrl = nco_poly_vrl_do(pl_out, pl_lst_in[idx], flg_snp_to, out_sng); break; } if(pl_vrl) { nco_poly_minmax_add(pl_vrl, grd_lon_typ, False); nco_poly_area_add(pl_vrl); } } if(bDirtyRats ) sprintf(mem_lst[thr_idx].kd_list[jdx].dbg_sng, "lret=%d in_sng=%s lret2=%d out_sng=%s\n",lret, in_sng, lret2, out_sng); if(bDirtyRats && pl_vrl && !nco_sph_is_convex(pl_vrl->shp, pl_vrl->crn_nbr) ) { (void) fprintf(stderr, "/************* concave overlap plygon***********/\n"); nco_poly_prn(pl_lst_in[idx],0); nco_poly_prn(pl_out,0); nco_poly_prn(pl_vrl, 0); (void) fprintf(stderr, "/***********************************************/\n"); } } /* end if poly_sph */ if (pl_vrl) { // nco_poly_re_org(pl_vrl, lcl_dp_x, lcl_dp_y); /* add aprropriate id's */ pl_vrl->src_id = pl_lst_in[idx]->src_id; pl_vrl->dst_id = pl_out->src_id; /* shp not needed */ nco_poly_shp_free(pl_vrl); /* calculate weight -simple ratio of areas */ pl_vrl->wgt=pl_vrl->area / pl_out->area; /* add lat/lon centers */ nco_poly_ctr_add(pl_vrl, grd_lon_typ); wrp_cnt+=pl_vrl->bwrp; if( isnan(pl_vrl->area) || pl_vrl->area ==0.0 ) { nan_cnt++; pl_vrl->area=0.0; pl_vrl=nco_poly_free(pl_vrl); continue; } /* for input polygon wgt is used to calculate frac_a */ pl_lst_in[idx]->wgt+= ( pl_vrl->area / pl_lst_in[idx]->area ); /* #ifdef _OPENMP #pragma omp critical #endif { pl_out->wgt+=pl_vrl->wgt; } */ vrl_area += pl_vrl->area; mem_lst[thr_idx].kd_list[jdx].area=pl_vrl->area; if( mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE < mem_lst[thr_idx].pl_cnt +1 ){ if(lst_out_typ==1) mem_lst[thr_idx].wgt_lst= (wgt_sct**)nco_realloc(mem_lst[thr_idx].wgt_lst, sizeof(wgt_sct*) * ++mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE ); else if(lst_out_typ==2) mem_lst[thr_idx].pl_lst= (poly_sct**)nco_realloc(mem_lst[thr_idx].pl_lst, sizeof(poly_sct*) * ++mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE ); } if(lst_out_typ==1) { wgt_sct *wgt_lcl=(wgt_sct*)nco_calloc(1,sizeof(wgt_sct)); wgt_lcl->src_id=pl_vrl->src_id; wgt_lcl->dst_id=pl_vrl->dst_id; wgt_lcl->area=pl_vrl->area; wgt_lcl->wgt=pl_vrl->wgt; mem_lst[thr_idx].wgt_lst[mem_lst[thr_idx].pl_cnt++] =wgt_lcl; pl_vrl=nco_poly_free(pl_vrl); } else if(lst_out_typ==2 ) mem_lst[thr_idx].pl_lst[mem_lst[thr_idx].pl_cnt++] = pl_vrl; vrl_cnt_on++; /* for area debug only */ } } /* end jdx */ if (nco_dbg_lvl_get() >= nco_dbg_dev) { #ifdef _OPENMP #pragma omp critical #endif { tot_nan_cnt += nan_cnt; tot_wrp_cnt += wrp_cnt; tot_area += vrl_area; } /* end OMP critical */ /* area diff by more than 10% */ int kdx; double eps = 1e-8; double frc = vrl_area / pl_lst_in[idx]->area; if (frc < (1 - eps) || frc > 1 + eps) { (void) fprintf(fp_stderr, "%s: polygon %lu - potential overlaps=%d actual overlaps=%d area_in=%.10e vrl_area=%.10e adiff=%.15e bSplit=%d\n", nco_prg_nm_get(), idx, vrl_cnt, vrl_cnt_on, pl_lst_in[idx]->area, vrl_area, (pl_lst_in[idx]->area - vrl_area), bSplit); if (bDirtyRats) { //if (pl_lst_in[idx]->bwrp ) { if (1) { (void) fprintf(fp_stderr, "# /** following pl_lst_in[%lu] **/\n", idx); nco_poly_prn(pl_lst_in[idx], 0); (void) fprintf(fp_stderr, "# /** overlaps to follow **/\n"); for (kdx = 0; kdx < vrl_cnt; kdx++) { nco_poly_prn((poly_sct *) mem_lst[thr_idx].kd_list[kdx].elem->item, 0); (void)fprintf(fp_stderr, "# vrl_area=%.15e\n",mem_lst[thr_idx].kd_list[kdx].area ); (void)fprintf(fp_stderr, "# dbg_sng=%s\n",mem_lst[thr_idx].kd_list[kdx].dbg_sng ); } (void) fprintf(stderr, "/************* end dirty rats ***************/\n"); } if(1 && vrl_cnt_on>0 && lst_out_typ==2 ) { pl_lst_dbg = (poly_sct **) nco_realloc(pl_lst_dbg, sizeof(poly_sct *) * (pl_cnt_dbg + vrl_cnt_on)); /* write overlaps maybe */ for (kdx = 0; kdx < vrl_cnt_on; kdx++) { poly_sct * lcl_poly=mem_lst[thr_idx].pl_lst[mem_lst[thr_idx].pl_cnt - vrl_cnt_on + kdx]; if(lcl_poly->src_id== pl_lst_in[idx]->src_id ) pl_lst_dbg[pl_cnt_dbg++] = nco_poly_dpl(lcl_poly); } } } } } /* end dbg */ cont_msk: ; /* output some usefull tracking stuff - not debug but informative */ if ( ++mem_lst[thr_idx].idx_cnt % thr_quota_step == 0 && nco_dbg_lvl_get() >=3 ) (void)fprintf(fp_stderr, "%s: thread %d has processed %2.2f%% (%ld) of src cells quota and output %ld overlap cells\n", nco_prg_nm_get(), thr_idx, (float)mem_lst[thr_idx].idx_cnt/(float)thr_quota *100.0, mem_lst[thr_idx].idx_cnt, mem_lst[thr_idx].pl_cnt ); } /* end for idx */ /* turn tot_area into a % of 4*PI */ /* tot_area = tot_area / 4.0 / M_PI *100.0; */ /* final report */ if (nco_dbg_lvl_get() >= nco_dbg_dev) (void) fprintf(stderr, "%s: total overlaps=%d, total_area=%.15f (area=%3.10f%%) total num wrapped= %d total nan nbr=%d \n", nco_prg_nm_get(), pl_cnt_vrl, tot_area, tot_area /4.0 / M_PI *100.0, tot_wrp_cnt, tot_nan_cnt); /* write filtered polygons to file */ if(bDirtyRats && pl_cnt_dbg) { nco_msh_poly_lst_wrt("tst-wrt-dbg.nc", pl_lst_dbg, pl_cnt_dbg, grd_lon_typ,NC_FORMAT_NETCDF4); pl_lst_dbg=(poly_sct**)nco_poly_lst_free(pl_lst_dbg, pl_cnt_dbg); } /* concatenate memory list, place results into first member list */ nco_mem_lst_cat(mem_lst, lcl_thr_nbr); /* free up kd_list's */ for(idx=0;idx<lcl_thr_nbr;idx++) mem_lst[idx].kd_list= (KDPriority*) nco_free(mem_lst[idx].kd_list); *pl_cnt_vrl_ret=mem_lst[0].pl_cnt; if(lst_out_typ==1) void_lst_vrl=(void**) mem_lst[0].wgt_lst; else if( lst_out_typ==2 ) void_lst_vrl=(void**) mem_lst[0].pl_lst; mem_lst=(omp_mem_sct*)nco_free(mem_lst); /* REMEMBER the void type can be a wgt_sct** array or a poly_sct** array * wgt_sct is a subset of poly_sct - with simple members */ return void_lst_vrl; } /* check areas - nb WARNING modifies area in pl_lst_in and pl_lst_out */ void nco_poly_lst_chk( poly_sct **pl_lst_in, int pl_cnt_in, poly_sct **pl_lst_out, int pl_cnt_out, poly_sct **pl_lst_vrl, int pl_cnt_vrl) { int id; int idx; int jdx; double epsilon=1.0e-8; const char fnc_nm[]="nco_poly_lst_chk()"; for(idx=0;idx<pl_cnt_vrl;idx++) { id=pl_lst_vrl[idx]->src_id; for(jdx=0;jdx<pl_cnt_in;jdx++) if(pl_lst_in[jdx]->src_id==id) break; if(jdx < pl_cnt_in ) pl_lst_in[jdx]->area-=pl_lst_vrl[idx]->area; } fprintf(stderr, "%s():WARNING following is list of incomplete src cells, by src_id no\n",fnc_nm); for(idx=0;idx<pl_cnt_in;idx++) if( fabs( pl_lst_in[idx]->area) > epsilon) fprintf(stderr, "src_id=%d area=%.10f\n", pl_lst_in[idx]->src_id, pl_lst_in[idx]->area ); for(idx=0;idx<pl_cnt_vrl;idx++) { id=pl_lst_vrl[idx]->dst_id; for(jdx=0;jdx<pl_cnt_out;jdx++) if(pl_lst_out[jdx]->src_id==id) break; if(jdx < pl_cnt_out ) pl_lst_out[jdx]->area-=pl_lst_vrl[idx]->area; } fprintf(stderr, "%s():WARNING following is list of incomplete dst cells, by src_id no\n",fnc_nm); for(idx=0;idx<pl_cnt_out;idx++) if( fabs( pl_lst_out[idx]->area) > epsilon) fprintf(stderr, "src_id=%d area=%.10f\n", pl_lst_out[idx]->src_id, pl_lst_out[idx]->area ); return; } poly_sct ** nco_poly_lst_chk_dbg( poly_sct **pl_lst, int pl_cnt, poly_sct **pl_lst_vrl, int pl_cnt_vrl, int io_flg, /* [flg] 0 - use src_id from vrl, 1 - use dst_id from vrl */ int *pl_cnt_dbg) /* size of output dbg grid */ { int id; int idx; int jdx; int pl_nbr_dbg=0; double epsilon=1.0e-12; double *area=NULL_CEWI; nco_bool is_lst_cnt=False; /* if true then pl_cnt matches max src_id There are no missing records from NetCDF SCRIP input */ is_lst_cnt=( pl_cnt== pl_lst[pl_cnt-1]->src_id +1); poly_sct **pl_lst_dbg=NULL_CEWI; const char fnc_nm[]="nco_poly_lst_chk_dbg()"; area=(double*)nco_malloc(sizeof(double)*pl_cnt); for(idx=0;idx<pl_cnt;idx++) area[idx]=pl_lst[idx]->area; for(idx=0;idx<pl_cnt_vrl;idx++) { id = (io_flg ? pl_lst_vrl[idx]->dst_id : pl_lst_vrl[idx]->src_id); if(is_lst_cnt ) area[id] -= pl_lst_vrl[idx]->area; else { for (jdx = 0; jdx < pl_cnt; jdx++) if (pl_lst[jdx]->src_id == id) break; if (jdx < pl_cnt) area[jdx] -= pl_lst_vrl[idx]->area; } } for(idx=0;idx<pl_cnt;idx++) { if (fabs(area[idx]) > epsilon) { if (nco_dbg_lvl_get() >= nco_dbg_dev) fprintf(stderr, "%s() src_id=%d area=%.15e\n", fnc_nm, pl_lst[idx]->src_id, area[idx]); pl_lst_dbg = (poly_sct **) nco_realloc(pl_lst_dbg, sizeof(poly_sct*) * (pl_nbr_dbg + 1)); pl_lst_dbg[pl_nbr_dbg] = nco_poly_dpl(pl_lst[idx]); pl_nbr_dbg++; } } area=(double*)nco_free(area); *pl_cnt_dbg=pl_nbr_dbg; return pl_lst_dbg; } wgt_sct ** nco_poly_lst_mk_dwe_sph( rgr_sct *const rgr_nfo, poly_sct **pl_lst_out, int pl_cnt, nco_grd_lon_typ_enm grd_lon_typ, KDTree **tree, int nbr_tr, int *wgt_cnt_bln_ret) { /* just duplicate output list to overlap */ const char fnc_nm[] = "nco_poly_lst_mk_dwe_sph()"; int thr_idx = 0; /* approx number of input cells each thread will process */ int thr_quota; /* reporting step */ int thr_quota_step; /* max number of nearest neighbours to consider - nr-reference from rgr_nfo */ int const max_nbr_dwe=20; int nbr_dwe=0; double pow_dwe=0.0; double min_dist=1.0e-12; double min_wgt=1.0e-20; poly_typ_enm pl_typ; size_t idx; int lcl_thr_nbr; omp_mem_sct *mem_lst = NULL_CEWI; wgt_sct **wgt_lst_dwe = NULL_CEWI; FILE *const fp_stderr = stderr; pl_typ = pl_lst_out[0]->pl_typ; lcl_thr_nbr = omp_get_max_threads(); nbr_dwe= ( rgr_nfo->xtr_nsp > max_nbr_dwe ? max_nbr_dwe : rgr_nfo->xtr_nsp ); pow_dwe=rgr_nfo->xtr_xpn; mem_lst = (omp_mem_sct *) nco_malloc(sizeof(omp_mem_sct) * lcl_thr_nbr); for (idx = 0; idx < lcl_thr_nbr; idx++) { mem_lst[idx].wgt_lst = NULL_CEWI; mem_lst[idx].blk_nbr = 0; mem_lst[idx].pl_cnt = 0; mem_lst[idx].kd_list = (KDPriority *) nco_calloc(sizeof(KDPriority), (size_t) (NCO_VRL_BLOCKSIZE)); mem_lst[idx].kd_cnt = 0; mem_lst[idx].kd_blk_nbr = 1; mem_lst[idx].idx_cnt = 0; } thr_quota = pl_cnt / lcl_thr_nbr; thr_quota_step = thr_quota / 20; if (thr_quota_step < 2000) thr_quota_step = 2000; /* NB: "OpenMP notes" section of nco_rgr.c has detailed discussion of these settings Henry, please keep the variables in alphabetical order within a clause and remember to update Intel */ #ifdef __GNUG__ # define GCC_LIB_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ ) # if GCC_LIB_VERSION < 490 # define GXX_OLD_OPENMP_SHARED_TREATMENT 1 # endif /* 480 */ #endif /* !__GNUC__ */ #if defined(__INTEL_COMPILER) # pragma omp parallel for private(idx,thr_idx) schedule(dynamic,40) shared(grd_lon_typ,nbr_tr,pl_typ,tree) #else /* !__INTEL_COMPILER */ # ifdef GXX_OLD_OPENMP_SHARED_TREATMENT # pragma omp parallel for default(none) private(idx,thr_idx) shared(bDirtyRats,grd_lon_typ,max_nbr_vrl,nbr_tr,pl_cnt_dbg,pl_typ,tree,tot_nan_cnt,tot_wrp_cnt) # else /* !old g++ */ # pragma omp parallel for private(idx,thr_idx) schedule(dynamic,40) shared(grd_lon_typ,nbr_tr,pl_typ,tree) # endif /* !old g++ */ #endif /* !__INTEL_COMPILER */ for (idx = 0; idx < pl_cnt; idx++) { double dp_x_wrp; /* used to do a wrapped lon search */ double wgt_ttl=0.0; int nbr_dwe_cnt; /* equal to or less than nbr_nni */ wgt_sct wgt_pre[max_nbr_dwe]; wgt_sct * wgt_lcl=NULL_CEWI; poly_sct *pl=NULL_CEWI; size_t jdx; size_t kdx; size_t nbr_lst_lcl; // (void) nco_poly_set_priority(max_nbr_vrl, list); thr_idx = omp_get_thread_num(); if (0 && nco_dbg_lvl_get() >= nco_dbg_dev) fprintf(fp_stderr, "%s(): idx=%lu thr=%d\n", fnc_nm, idx, thr_idx); if (pl_lst_out[idx]->bmsk == False) continue; mem_lst[thr_idx].kd_cnt = 0; if (mem_lst[thr_idx].kd_blk_nbr > 1) { mem_lst[thr_idx].kd_blk_nbr = 1; mem_lst[thr_idx].kd_list = (KDPriority *) nco_free(mem_lst[thr_idx].kd_list); //mem_lst[thr_idx].kd_list=(KDPriority*)nco_realloc(mem_lst[idx].kd_list, sizeof(KDPriority) * NCO_VRL_BLOCKSIZE ); mem_lst[idx].kd_list = (KDPriority *) nco_calloc(sizeof(KDPriority), (size_t) (NCO_VRL_BLOCKSIZE)); } /* get bounds of polygon in */ //bSplit = nco_poly_minmax_split(pl_lst_out[idx], grd_lon_typ, size1, size2); dp_x_wrp=KD_DBL_MAX; for(kdx=0;kdx<nbr_tr;kdx++) kd_nearest(tree[kdx], pl_lst_out[idx]->dp_x_ctr, pl_lst_out[idx]->dp_y_ctr, pl_typ, nbr_dwe, mem_lst[thr_idx].kd_list + nbr_dwe *kdx ); nbr_lst_lcl=nbr_dwe*nbr_tr; switch(grd_lon_typ) { case nco_grd_lon_nil: case nco_grd_lon_unk: case nco_grd_lon_Grn_ctr: case nco_grd_lon_Grn_wst: case nco_grd_lon_bb: if(pl_lst_out[idx]->dp_x_ctr <180.0) dp_x_wrp=pl_lst_out[idx]->dp_x_ctr+360.0; else if(pl_lst_out[idx]->dp_x_ctr >180.0) dp_x_wrp=pl_lst_out[idx]->dp_x_ctr-360.0; break; case nco_grd_lon_180_wst: case nco_grd_lon_180_ctr: if(pl_lst_out[idx]->dp_x_ctr <0.0) dp_x_wrp=pl_lst_out[idx]->dp_x_ctr+360.0; else if(pl_lst_out[idx]->dp_x_ctr >0.0) dp_x_wrp=pl_lst_out[idx]->dp_x_ctr-360.0; break; } if(dp_x_wrp != KD_DBL_MAX) { for (kdx = 0; kdx < nbr_tr; kdx++) kd_nearest(tree[kdx], dp_x_wrp, pl_lst_out[idx]->dp_y_ctr, pl_typ, nbr_dwe, mem_lst[thr_idx].kd_list + nbr_lst_lcl+nbr_dwe * kdx); nbr_lst_lcl+=nbr_dwe*nbr_tr; } if(nbr_tr >1 ) qsort((void *)mem_lst[thr_idx].kd_list, nbr_lst_lcl, sizeof(KDPriority), kd_priority_cmp_dist); /* remember kd list sorted according to distance */ /* check first member distance if min then output singleton */ if(mem_lst[thr_idx].kd_list[0].dist <= min_dist) { pl=(poly_sct*)mem_lst[thr_idx].kd_list[0].elem->item; wgt_lcl=(wgt_sct*)nco_malloc(sizeof(wgt_sct)); wgt_lcl->src_id=pl->src_id; wgt_lcl->dst_id=pl_lst_out[idx]->src_id; wgt_lcl->area=pl->area; wgt_lcl->dist=mem_lst[thr_idx].kd_list[0].dist; wgt_lcl->wgt=1.0; if( mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE < mem_lst[thr_idx].pl_cnt +1 ) mem_lst[thr_idx].wgt_lst= (wgt_sct**)nco_realloc(mem_lst[thr_idx].wgt_lst, sizeof(wgt_sct*) * ++mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE ); mem_lst[thr_idx].wgt_lst[mem_lst[thr_idx].pl_cnt++] =wgt_lcl; if (nco_dbg_lvl_get() >= nco_dbg_dev) (void)fprintf(fp_stderr,"%s:%s: singleton x_ctr=%f y_ctr=%f\n", nco_prg_nm_get(), fnc_nm, pl_lst_out[idx]->dp_x_ctr, pl_lst_out[idx]->dp_y_ctr ); }else{ /* check for duplicates in first nbr_nni by sorting again with ->item !!!*/ kd_priority_list_sort(mem_lst[thr_idx].kd_list,nbr_dwe, nbr_dwe,&nbr_dwe_cnt ); if (nco_dbg_lvl_get() >= nco_dbg_dev && nbr_dwe_cnt < nbr_dwe ) (void)fprintf(fp_stderr,"%s:%s: nbr_nni_cnt=%d x_ctr=%f y_ctr=%f\n", nco_prg_nm_get(), fnc_nm, nbr_dwe_cnt, pl_lst_out[idx]->dp_x_ctr, pl_lst_out[idx]->dp_y_ctr ); /* output at least one */ for (jdx = 0; jdx < nbr_dwe_cnt; jdx++) { pl = (poly_sct *) mem_lst[thr_idx].kd_list[jdx].elem->item; /* remember src is in tree and dst is in list */ wgt_pre[jdx].src_id = pl->src_id; wgt_pre[jdx].dst_id = pl_lst_out[idx]->src_id; wgt_pre[jdx].area = pl->area; wgt_pre[jdx].dist = mem_lst[thr_idx].kd_list[jdx].dist; /* use dist squared */ wgt_pre[jdx].wgt = 1.0 / pow(wgt_pre[jdx].dist, pow_dwe); } /* find weights total */ for (jdx = 0; jdx < nbr_dwe_cnt; jdx++) wgt_ttl += wgt_pre[jdx].wgt; /* normalize weights */ for (jdx = 0; jdx < nbr_dwe_cnt; jdx++) wgt_pre[jdx].wgt /= wgt_ttl; for (jdx = 0; jdx < nbr_dwe_cnt; jdx++) { if (wgt_pre[jdx].wgt < min_wgt) continue; wgt_lcl = (wgt_sct *) nco_malloc(sizeof(wgt_sct)); *wgt_lcl = wgt_pre[jdx]; if (mem_lst[thr_idx].blk_nbr * NCO_VRL_BLOCKSIZE < mem_lst[thr_idx].pl_cnt + 1) mem_lst[thr_idx].wgt_lst = (wgt_sct **) nco_realloc(mem_lst[thr_idx].wgt_lst,sizeof(wgt_sct *) * ++mem_lst[thr_idx].blk_nbr *NCO_VRL_BLOCKSIZE); mem_lst[thr_idx].wgt_lst[mem_lst[thr_idx].pl_cnt++] = wgt_lcl; //(void)fprintf(stderr,"%s: weight(%lu)=%f ",fnc_nm, mem_lst[thr_idx].pl_cnt, wgt_lcl->wgt); } /* end jdx */ } /* output some usefull tracking stuff - not debug but informative */ if ( ++mem_lst[thr_idx].idx_cnt % thr_quota_step == 0 && nco_dbg_lvl_get() >=3 ) (void)fprintf(fp_stderr, "%s: thread %d has processed %2.2f%% (%ld) of src cells quota and output %ld overlap cells\n", nco_prg_nm_get(), thr_idx, (float)mem_lst[thr_idx].idx_cnt/(float)thr_quota *100.0, mem_lst[thr_idx].idx_cnt, mem_lst[thr_idx].pl_cnt ); } /* end idx */ nco_mem_lst_cat(mem_lst, lcl_thr_nbr); /* free up kd_list's */ for(idx=0;idx<lcl_thr_nbr;idx++) mem_lst[idx].kd_list= (KDPriority*) nco_free(mem_lst[idx].kd_list); wgt_lst_dwe=mem_lst[0].wgt_lst; *wgt_cnt_bln_ret=mem_lst[0].pl_cnt; mem_lst=(omp_mem_sct*)nco_free(mem_lst); return wgt_lst_dwe; } /* !nco_poly_lst_mk_dwe_sph() */ void nco_poly_lst_ctr_add( poly_sct **pl_lst, int pl_cnt, int ctr_typ) { int idx; double pControl[NBR_SPH]; for(idx=0;idx<pl_cnt;idx++) { if(pl_lst[idx]->crn_nbr <3 || pl_lst[idx]->area==0.0 ) continue; if(ctr_typ==1){ nco_sph_inside_mk(pl_lst[idx], pControl); pl_lst[idx]->dp_x_ctr=R2D(pControl[3]); pl_lst[idx]->dp_y_ctr=R2D(pControl[4]); } } return; } /* !nco_poly_lst_ctr_add() */ void nco_mem_lst_cat( omp_mem_sct *mem_lst, int sz_lst) { int idx; int i_typ; size_t tot_cnt = 0; if(mem_lst->wgt_lst) i_typ = 1; else if(mem_lst->pl_lst) i_typ = 2; else i_typ=0; /* quick return if list empty */ if(!i_typ) return; /* find total size of lists */ for (idx = 0; idx < sz_lst; idx++) tot_cnt += mem_lst[idx].pl_cnt; if(!tot_cnt) return; else if( i_typ==1 ){ wgt_sct **tmp_wgt_lst = NULL; tmp_wgt_lst = mem_lst[0].wgt_lst = (wgt_sct **) nco_realloc(mem_lst[0].wgt_lst, sizeof(wgt_sct *) * tot_cnt); tmp_wgt_lst += mem_lst[0].pl_cnt; for (idx = 1; idx < sz_lst; idx++) { if (mem_lst[idx].wgt_lst) { memcpy(tmp_wgt_lst, mem_lst[idx].wgt_lst, sizeof(wgt_sct *) * mem_lst[idx].pl_cnt); tmp_wgt_lst += mem_lst[idx].pl_cnt; /* free up list */ mem_lst[idx].wgt_lst = (wgt_sct **) nco_free(mem_lst[idx].wgt_lst); } } } else if(i_typ==2) { poly_sct **tmp_ply_lst = NULL; tmp_ply_lst = mem_lst[0].pl_lst = (poly_sct **) nco_realloc(mem_lst[0].pl_lst, sizeof(poly_sct *) * tot_cnt); tmp_ply_lst += mem_lst[0].pl_cnt; for (idx = 1; idx < sz_lst; idx++) { if (mem_lst[idx].pl_lst) { memcpy(tmp_ply_lst, mem_lst[idx].pl_lst, sizeof(poly_sct *) * mem_lst[idx].pl_cnt); tmp_ply_lst += mem_lst[idx].pl_cnt; /* free up list */ mem_lst[idx].pl_lst = (poly_sct **) nco_free(mem_lst[idx].pl_lst); } } } /* update first list with total */ mem_lst[0].pl_cnt=tot_cnt; return; } /* !nco_mem_lst_cat() */
GB_binop__land_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__land_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__land_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp32) // A*D function (colscale): GB (_AxD__land_fp32) // D*A function (rowscale): GB (_DxB__land_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__land_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__land_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp32) // C=scalar+B GB (_bind1st__land_fp32) // C=scalar+B' GB (_bind1st_tran__land_fp32) // C=A+scalar GB (_bind2nd__land_fp32) // C=A'+scalar GB (_bind2nd_tran__land_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_FP32 || GxB_NO_LAND_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__land_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__land_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__expm1_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__expm1_fp64_fp64 // op(A') function: GB_unop_tran__expm1_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = expm1 (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = expm1 (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = expm1 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__expm1_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = expm1 (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__expm1_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
p_index.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define THRESHOLD 8 static void p_merge(unsigned int* base, unsigned int* l1, unsigned int* h1, unsigned int* l2, unsigned int* h2, unsigned int* buf) { unsigned int* i = l1; unsigned int* j = l2; unsigned int* k = buf; for(;(i != h1) && (j != h2);) { if(*(base+(*(j))) < *(base+(*(i)))) memcpy(k++, j++, sizeof(unsigned int)); else memcpy(k++, i++, sizeof(unsigned int)); } for(;i != h1;) memcpy(k++, i++, sizeof(unsigned int)); for(;j != h2;) memcpy(k++, j++, sizeof(unsigned int)); memcpy(l1, buf, ((h1-l1) + (h2-l2)) * sizeof(unsigned int)); } static void p_sort_(unsigned int* base, unsigned int* l, unsigned int* h) { unsigned int t; unsigned int* i; for(i = l; i < h; i++) { unsigned int* elm = base + (*(i)); unsigned int* j; for(j = i+1; j < h; j++) { if(*elm > *(base + (*(j)))) { unsigned int t = *(i); *(i) = *(j); *(j) = t; elm = base + (*(i)); } } } } static void p_sort(unsigned int* base, unsigned int* l, unsigned int* h, unsigned int* p_) { if((h - l) <= THRESHOLD) p_sort_(base, l, h); else { unsigned int* m = l + (h - l) / 2; unsigned int* m_ = p_ + (m - l); #pragma omp task p_sort(base, l, m, p_); p_sort(base, m, h, m_); #pragma omp taskwait p_merge(base, l, m, m, h, p_); } } void p_init(size_t sz, unsigned int* buf, unsigned int* p) { unsigned int* p_; p_ = (unsigned int*) _mm_malloc(sz * sizeof(unsigned int), 64); unsigned int i; for(i = 0; i < sz; i++) p[i] = i; unsigned int* l = p; unsigned int* h = p + sz; #pragma omp parallel { #pragma omp single { p_sort(buf, l, h, p_); } } _mm_free(p_); }
add_mp.c
#include <omp.h> #include <stdlib.h> #include <stdio.h> int main(int argc, char argv[]) { int omp_rank, omp_threads; int a[10]={1,1,1,1,1,1,1,1,1,1}; a[0]=1; int n =10; int i; #pragma omp parallel private( i ) shared(a,n) { #pragma omp for for (i=1; i<n; i++) { omp_rank = omp_get_thread_num(); omp_threads = omp_get_num_threads(); a[i] = 2 * a[i-1]; printf("a[%d] = %d : %d of threads %d\n", i,a[i] ,omp_rank, omp_threads); } } }
critical.c
/* Copyright (C) 2005, 2009, 2011 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU OpenMP Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file handles the CRITICAL construct. */ #include "libgomp.h" #include <stdlib.h> static gomp_mutex_t default_lock; void GOMP_critical_start (void) { /* There is an implicit flush on entry to a critical region. */ __atomic_thread_fence (MEMMODEL_RELEASE); gomp_mutex_lock (&default_lock); } void GOMP_critical_end (void) { gomp_mutex_unlock (&default_lock); } #ifndef HAVE_SYNC_BUILTINS static gomp_mutex_t create_lock_lock; #endif void GOMP_critical_name_start (void **pptr) { gomp_mutex_t *plock; /* If a mutex fits within the space for a pointer, and is zero initialized, then use the pointer space directly. */ if (GOMP_MUTEX_INIT_0 && sizeof (gomp_mutex_t) <= sizeof (void *) && __alignof (gomp_mutex_t) <= sizeof (void *)) plock = (gomp_mutex_t *)pptr; /* Otherwise we have to be prepared to malloc storage. */ else { plock = *pptr; if (plock == NULL) { #ifdef HAVE_SYNC_BUILTINS gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t)); gomp_mutex_init (nlock); plock = __sync_val_compare_and_swap (pptr, NULL, nlock); if (plock != NULL) { gomp_mutex_destroy (nlock); free (nlock); } else plock = nlock; #else gomp_mutex_lock (&create_lock_lock); plock = *pptr; if (plock == NULL) { plock = gomp_malloc (sizeof (gomp_mutex_t)); gomp_mutex_init (plock); __sync_synchronize (); *pptr = plock; } gomp_mutex_unlock (&create_lock_lock); #endif } } gomp_mutex_lock (plock); } void GOMP_critical_name_end (void **pptr) { gomp_mutex_t *plock; /* If a mutex fits within the space for a pointer, and is zero initialized, then use the pointer space directly. */ if (GOMP_MUTEX_INIT_0 && sizeof (gomp_mutex_t) <= sizeof (void *) && __alignof (gomp_mutex_t) <= sizeof (void *)) plock = (gomp_mutex_t *)pptr; else plock = *pptr; gomp_mutex_unlock (plock); } /* This mutex is used when atomic operations don't exist for the target in the mode requested. The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. */ static gomp_mutex_t atomic_lock; void GOMP_atomic_start (void) { gomp_mutex_lock (&atomic_lock); } void GOMP_atomic_end (void) { gomp_mutex_unlock (&atomic_lock); } #if !GOMP_MUTEX_INIT_0 static void __attribute__((constructor)) initialize_critical (void) { gomp_mutex_init (&default_lock); gomp_mutex_init (&atomic_lock); #ifndef HAVE_SYNC_BUILTINS gomp_mutex_init (&create_lock_lock); #endif } #endif
convolution_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float16x8_t _sum = vdupq_n_f16((__fp16)0.f); if (bias_data_ptr) { _sum = vld1q_f16(bias_data_ptr + p * 8); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { float16x8_t _val = vld1q_f16(sptr + space_ofs[k] * 8); float16x8_t _w0 = vld1q_f16(kptr); float16x8_t _w1 = vld1q_f16(kptr + 8); float16x8_t _w2 = vld1q_f16(kptr + 16); float16x8_t _w3 = vld1q_f16(kptr + 24); float16x8_t _w4 = vld1q_f16(kptr + 32); float16x8_t _w5 = vld1q_f16(kptr + 40); float16x8_t _w6 = vld1q_f16(kptr + 48); float16x8_t _w7 = vld1q_f16(kptr + 56); _sum = vfmaq_laneq_f16(_sum, _w0, _val, 0); _sum = vfmaq_laneq_f16(_sum, _w1, _val, 1); _sum = vfmaq_laneq_f16(_sum, _w2, _val, 2); _sum = vfmaq_laneq_f16(_sum, _w3, _val, 3); _sum = vfmaq_laneq_f16(_sum, _w4, _val, 4); _sum = vfmaq_laneq_f16(_sum, _w5, _val, 5); _sum = vfmaq_laneq_f16(_sum, _w6, _val, 6); _sum = vfmaq_laneq_f16(_sum, _w7, _val, 7); kptr += 64; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1q_f16(outptr + j * 8, _sum); } outptr += outw * 8; } } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
ecryptfs_fmt_plug.c
/* Cracker for eCryptfs ~/.ecryptfs/wrapped-passphrase. * * We attack "login passphrase" instead of "mount passphrase" (and which could * be 128-bit random key!). * * "ecryptfs_unwrap_passphrase -> generate_passphrase_sig" in * src/libecryptfs/key_management.c is important. * * Do we need to do full decryption as done in "ecryptfs_unwrap_passphrase"? * I believe, 8 bytes of verification data ought to be enough for anybody! * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ecryptfs1; #elif FMT_REGISTERS_H john_register_one(&fmt_ecryptfs1); #else #include <string.h> #include <errno.h> #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "base64_convert.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 8 // XXX #endif #include "memdbg.h" #define FORMAT_TAG "$ecryptfs$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define FORMAT_LABEL "eCryptfs" #define FORMAT_NAME "" #define ALGORITHM_NAME "65536x SHA-512" // good luck with that! #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define REAL_BINARY_SIZE 8 #define HEX_BINARY_SIZE (REAL_BINARY_SIZE*2) #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* taken from eCryptfs */ #define ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS 65536 #define ECRYPTFS_MAX_PASSWORD_LENGTH 64 #define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH #define ECRYPTFS_SALT_SIZE 8 #define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2) #define ECRYPTFS_DEFAULT_SALT "\x00\x11\x22\x33\x44\x55\x66\x77" #define ECRYPTFS_DEFAULT_SALT_HEX "0011223344556677" #define ECRYPTFS_DEFAULT_SALT_FNEK_HEX "9988776655443322" #define ECRYPTFS_SIG_SIZE 8 #define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2) #define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX #define ECRYPTFS_MAX_KEY_BYTES 64 #define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512 #define ECRYPTFS_DEFAULT_IV_BYTES 16 static struct fmt_tests ecryptfs_tests[] = { /* hash ==> first 16 bytes of ~/.ecryptfs/wrapped-passphrase */ {"$ecryptfs$0$92dc3db8feaf1676", "openwall"}, {"$ecryptfs$0$ccb515ee115be591", "failpassword"}, {"$ecryptfs$0$8acb10b9e061fcc7", "verylongbutstillfailpassword"}, /* fake hash to test custom salt handling */ {"$ecryptfs$0$1$0000000000000000$884ed410cd143bca", "fake"}, {"$ecryptfs$0$1$544c39674737716a$a8307a01b2d1b008", "fake"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; // really really unused (even in the original code) int salt_length; char unsigned salt[ECRYPTFS_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; p = ciphertext + FORMAT_TAG_LENGTH; if (*p != '0' || *(p + 1) != '$') return 0; p += 2; if (*p == '1' || *(p + 1) == '$') { // handle salted variety p += 2; if (base64_valid_length(p, e_b64_hex, flg_Base64_NO_FLAGS) != HEX_BINARY_SIZE || p[HEX_BINARY_SIZE] != '$') return 0; p += (HEX_BINARY_SIZE+1); } return base64_valid_length(p, e_b64_hex, flg_Base64_NO_FLAGS) == HEX_BINARY_SIZE; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p, *q; memset(&cs, 0, SALT_SIZE); p = ciphertext + FORMAT_TAG_LENGTH; p = p + 2; // skip over "0$" /* support for custom salt */ if (*p == '1' && *(p + 1) == '$') { p = p + 2; q = strchr(p, '$'); cs.salt_length = (q - p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else { memcpy(cs.salt, ECRYPTFS_DEFAULT_SALT, ECRYPTFS_SALT_SIZE); } return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[REAL_BINARY_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; int i; char *p = strrchr(ciphertext, '$') + 1; for (i = 0; i < REAL_BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { int j; SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, cur_salt->salt, ECRYPTFS_SALT_SIZE); SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA512_Final((unsigned char *)crypt_out[index], &ctx); /* now "h" (crypt_out[index] becomes our input, total SHA-512 calls => 65536 */ for (j = 1; j <= ECRYPTFS_DEFAULT_NUM_HASH_ITERATIONS; j++) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); SHA512_Final((unsigned char *)crypt_out[index], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], REAL_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], REAL_BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void ecryptfs_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_ecryptfs1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, REAL_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif ecryptfs_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, ecryptfs_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
luo_rudy_1991.c
#include "luo_rudy_1991.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { sv[0] = -84.380111f; //V millivolt sv[1] = 0.001713f; //m dimensionless sv[2] = 0.982661f; //h dimensionless sv[3] = 0.989108f; //j dimensionless sv[4] = 0.003021f; //d dimensionless sv[5] = 0.999968f; //f dimensionless sv[6] = 0.041760f; //X dimensionless sv[7] = 0.000179f; //Cai millimolar } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current); for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } #define IFNUMBER_1(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = (1.350000000000000e-01f*expf(((8.000000000000000e+01f+V_old_)/(-6.800000000000000e+00f)))); } else{ (name) = 0.000000000000000e+00f; } #define IFNUMBER_2(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = ((3.560000000000000e+00f*expf((7.900000000000000e-02f*V_old_)))+(3.100000000000000e+05f*expf((3.500000000000000e-01f*V_old_)))); } else{ (name) = (1.000000000000000e+00f/(1.300000000000000e-01f*(1.000000000000000e+00f+expf(((V_old_+1.066000000000000e+01f)/(-1.110000000000000e+01f)))))); } #define IFNUMBER_3(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = (((((-1.271400000000000e+05f)*expf((2.444000000000000e-01f*V_old_)))-(3.474000000000000e-05f*expf(((-4.391000000000000e-02f)*V_old_))))*(V_old_+3.778000000000000e+01f))/(1.000000000000000e+00f+expf((3.110000000000000e-01f*(V_old_+7.923000000000000e+01f))))); } else{ (name) = 0.000000000000000e+00f; } #define IFNUMBER_4(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = ((1.212000000000000e-01f*expf(((-1.052000000000000e-02f)*V_old_)))/(1.000000000000000e+00f+expf(((-1.378000000000000e-01f)*(V_old_+4.014000000000000e+01f))))); } else{ (name) = ((3.000000000000000e-01f*expf(((-2.535000000000000e-07f)*V_old_)))/(1.000000000000000e+00f+expf(((-1.000000000000000e-01f)*(V_old_+3.200000000000000e+01f))))); } #define IFNUMBER_5(name)if((V_old_>(-1.000000000000000e+02f))) { (name) = ((2.837000000000000e+00f*(expf((4.000000000000000e-02f*(V_old_+7.700000000000000e+01f)))-1.000000000000000e+00f))/((V_old_+7.700000000000000e+01f)*expf((4.000000000000000e-02f*(V_old_+3.500000000000000e+01f))))); } else{ (name) = 1.000000000000000e+00f; } void RHS_cpu(const real *sv, real *rDY_, real stim_current) { //State variables const real V_old_ = sv[0]; const real m_old_ = sv[1]; const real h_old_ = sv[2]; const real j_old_ = sv[3]; const real d_old_ = sv[4]; const real f_old_ = sv[5]; const real X_old_ = sv[6]; const real Cai_old_ = sv[7]; //Parameters const real C = 1.000000000000000e+00f; const real R = 8.314000000000000e+03f; const real T = 3.100000000000000e+02f; const real F = 9.648460000000001e+04f; const real Nao = 1.400000000000000e+02f; const real Nai = 1.800000000000000e+01f; const real g_Na = 2.300000000000000e+01f; const real Ko = 5.400000000000000e+00f; const real PR_NaK = 1.833000000000000e-02f; const real Ki = 1.450000000000000e+02f; const real g_Kp = 1.830000000000000e-02f; const real g_b = 3.921000000000000e-02f; const real E_b = -5.987000000000000e+01f; real calc_I_stim = stim_current; real calc_E_Na = (((R*T)/F)*logf((Nao/Nai))); //2 real calc_alpha_m = ((3.200000000000000e-01f*(V_old_+4.713000000000000e+01f))/(1.000000000000000e+00f-expf(((-1.000000000000000e-01f)*(V_old_+4.713000000000000e+01f))))); //4 real calc_beta_m = (8.000000000000000e-02f*expf(((-V_old_)/1.100000000000000e+01f))); //5 real calc_alpha_h = 0.0f; IFNUMBER_1(calc_alpha_h); //7 real calc_beta_h = 0.0f; IFNUMBER_2(calc_beta_h); //8 real calc_alpha_j = 0.0f; IFNUMBER_3(calc_alpha_j); //10 real calc_beta_j = 0.0f; IFNUMBER_4(calc_beta_j); //11 real calc_E_si = (7.700000000000000e+00f-(1.302870000000000e+01f*logf((Cai_old_/1.000000000000000e+00f)))); //13 real calc_alpha_d = ((9.500000000000000e-02f*expf(((-1.000000000000000e-02f)*(V_old_-5.000000000000000e+00f))))/(1.000000000000000e+00f+expf(((-7.199999999999999e-02f)*(V_old_-5.000000000000000e+00f))))); //15 real calc_beta_d = ((7.000000000000001e-02f*expf(((-1.700000000000000e-02f)*(V_old_+4.400000000000000e+01f))))/(1.000000000000000e+00f+expf((5.000000000000000e-02f*(V_old_+4.400000000000000e+01f))))); //16 real calc_alpha_f = ((1.200000000000000e-02f*expf(((-8.000000000000000e-03f)*(V_old_+2.800000000000000e+01f))))/(1.000000000000000e+00f+expf((1.500000000000000e-01f*(V_old_+2.800000000000000e+01f))))); //18 real calc_beta_f = ((6.500000000000000e-03f*expf(((-2.000000000000000e-02f)*(V_old_+3.000000000000000e+01f))))/(1.000000000000000e+00f+expf(((-2.000000000000000e-01f)*(V_old_+3.000000000000000e+01f))))); //19 real calc_g_K = (2.820000000000000e-01f*powf((Ko/5.400000000000000e+00f),0.5f)); //21 real calc_E_K = (((R*T)/F)*logf(((Ko+(PR_NaK*Nao))/(Ki+(PR_NaK*Nai))))); //22 real calc_alpha_X = ((5.000000000000000e-04f*expf((8.300000000000000e-02f*(V_old_+5.000000000000000e+01f))))/(1.000000000000000e+00f+expf((5.700000000000000e-02f*(V_old_+5.000000000000000e+01f))))); //24 real calc_beta_X = ((1.300000000000000e-03f*expf(((-6.000000000000000e-02f)*(V_old_+2.000000000000000e+01f))))/(1.000000000000000e+00f+expf(((-4.000000000000000e-02f)*(V_old_+2.000000000000000e+01f))))); //25 real calc_Xi = 0.0f; IFNUMBER_5(calc_Xi); //27 real calc_g_K1 = (6.047000000000000e-01f*powf((Ko/5.400000000000000e+00f),0.5f)); //28 real calc_E_K1 = (((R*T)/F)*logf((Ko/Ki))); //29 real calc_Kp = (1.000000000000000e+00f/(1.000000000000000e+00f+expf(((7.488000000000000e+00f-V_old_)/5.980000000000000e+00f)))); //35 real calc_i_b = (g_b*(V_old_-E_b)); //37 real calc_i_Na = (g_Na*powf(m_old_,3.000000000000000e+00f)*h_old_*j_old_*(V_old_-calc_E_Na)); //3 real calc_i_si = (9.000000000000000e-02f*d_old_*f_old_*(V_old_-calc_E_si)); //14 real calc_alpha_K1 = (1.020000000000000e+00f/(1.000000000000000e+00f+expf((2.385000000000000e-01f*((V_old_-calc_E_K1)-5.921500000000000e+01f))))); //31 real calc_beta_K1 = (((4.912400000000000e-01f*expf((8.032000000000000e-02f*((V_old_+5.476000000000000e+00f)-calc_E_K1))))+(1.000000000000000e+00f*expf((6.175000000000000e-02f*(V_old_-(calc_E_K1+5.943099999999999e+02f))))))/(1.000000000000000e+00f+expf(((-5.143000000000000e-01f)*((V_old_-calc_E_K1)+4.753000000000000e+00f))))); //32 real calc_E_Kp = calc_E_K1; //34 real calc_i_K = (calc_g_K*X_old_*calc_Xi*(V_old_-calc_E_K)); //23 real calc_K1_infinity = (calc_alpha_K1/(calc_alpha_K1+calc_beta_K1)); //33 real calc_i_Kp = (g_Kp*calc_Kp*(V_old_-calc_E_Kp)); //36 real calc_i_K1 = (calc_g_K1*calc_K1_infinity*(V_old_-calc_E_K1)); //30 rDY_[0] = (((-1.000000000000000e+00f)/C)*(calc_I_stim+calc_i_Na+calc_i_si+calc_i_K+calc_i_K1+calc_i_Kp+calc_i_b)); rDY_[1] = ((calc_alpha_m*(1.000000000000000e+00f-m_old_))-(calc_beta_m*m_old_)); rDY_[2] = ((calc_alpha_h*(1.000000000000000e+00f-h_old_))-(calc_beta_h*h_old_)); rDY_[3] = ((calc_alpha_j*(1.000000000000000e+00f-j_old_))-(calc_beta_j*j_old_)); rDY_[4] = ((calc_alpha_d*(1.000000000000000e+00f-d_old_))-(calc_beta_d*d_old_)); rDY_[5] = ((calc_alpha_f*(1.000000000000000e+00f-f_old_))-(calc_beta_f*f_old_)); rDY_[6] = ((calc_alpha_X*(1.000000000000000e+00f-X_old_))-(calc_beta_X*X_old_)); rDY_[7] = ((((-1.000000000000000e-04f)/1.000000000000000e+00f)*calc_i_si)+(7.000000000000001e-02f*(1.000000000000000e-04f-Cai_old_))); }
taskloop_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop simd'}} #pragma omp taskloop simd foo void test_no_clause() { int i; #pragma omp taskloop simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp taskloop simd' must be a for loop}} #pragma omp taskloop simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp taskloop simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd foo bar for (i = 0; i < 16; ++i) ; // expected-error@+1 {{directive '#pragma omp taskloop simd' cannot contain more than one 'nogroup' clause}} #pragma omp taskloop simd nogroup nogroup for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd; for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp parallel #pragma omp taskloop simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} #pragma omp taskloop simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp taskloop simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel #pragma omp taskloop simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp taskloop simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp taskloop simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp taskloop simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp taskloop simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp taskloop simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp taskloop simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp taskloop simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp taskloop simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp taskloop simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp taskloop simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp taskloop simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp taskloop simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}} #pragma omp taskloop simd for (__int128 ii = 0; ii < 10; ii++) { c[ii] = a[ii] + b[ii]; } }
ntlmv1_mschapv2_fmt_plug.c
/* * Previous files MSCHAPv2_fmt_plug.c and NETNTLM_fmt_plug.c now merged into * this one file, sharing functions. * * NETNTLM_fmt.c -- NTLM Challenge/Response * Written by JoMo-Kun <jmk at foofus.net> in 2007 * and placed in the public domain. * * This algorithm is designed for performing brute-force cracking of the NTLM * (version 1) challenge/response pairs exchanged during network-based * authentication attempts [1]. The captured challenge/response pairs from these * attempts should be stored using the L0phtCrack 2.0 LC format, specifically: * username:unused:unused:lm response:ntlm response:challenge. For example: * * CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1: * C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788 * * It should be noted that a NTLM authentication response is not same as a NTLM * password hash, which can be extracted using tools such as FgDump [2]. NTLM * responses can be gathered via normal network capture or via tools which * perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can * also be harvested using a modified Samba service [5] in conjunction with * some trickery to convince the user to connect to it. I leave what that * trickery may actually be as an exercise for the reader (HINT: Karma, NMB * broadcasts, IE, Outlook, social engineering, ...). * * [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse * [2] http://www.foofus.net/~fizzgig/fgdump/ * [3] http://ettercap.sourceforge.net/ * [4] http://www.oxid.it/cain.html * [5] http://www.foofus.net/jmk/smbchallenge.html * * This version supports Extended Session Security. This is what * is used when the "LM" hash ends in 32 zeros: * * DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000: * abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4 * * MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2 * Written by JoMo-Kun <jmk at foofus.net> in 2010 * and placed in the public domain. * * Support for freeradius-wep-patch challenge/response format * added by Linus Lüssing in 2012 and is licensed under CC0/PD terms: * To the extent possible under law, Linus Lüssing has waived all copyright * and related or neighboring rights to this work. This work is published from: * Germany. * * * This algorithm is designed for performing brute-force cracking of the * MSCHAPv2 challenge/response sets exchanged during network-based * authentication attempts. The captured challenge/response set from these * attempts should be stored using the following format: * * USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE: * * For example: * User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E * domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc * * http://freeradius.org/rfc/rfc2759.txt * * Modified for performance and support for SSE2, NTLMv1 ESS, OMP and UTF-8, by * magnum 2010-2011 and 2013. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_MSCHAPv2_new; extern struct fmt_main fmt_NETNTLM_new; #elif FMT_REGISTERS_H john_register_one(&fmt_MSCHAPv2_new); john_register_one(&fmt_NETNTLM_new); #else #include <string.h> #include <openssl/des.h> #include "arch.h" #include "simd-intrinsics.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4) #else #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #include <omp.h> #endif #endif #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "memory.h" #include "sha.h" #include "md4.h" #include "md5.h" #include "unicode.h" #include "john.h" #include "memdbg.h" extern volatile int bench_running; #ifndef uchar #define uchar unsigned char #endif #define CHAP_FORMAT_LABEL "MSCHAPv2" #define CHAP_FORMAT_NAME "C/R" #define FORMAT_TAG "$MSCHAPv2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define FORMAT_TAGN "$NETNTLM$" #define FORMAT_TAGN_LEN (sizeof(FORMAT_TAGN)-1) #define CHAP_USERNAME_LENGTH 256 #define CHAP_CHALLENGE_LENGTH 64 #define CHAP_TOTAL_LENGTH 13 + CHAP_USERNAME_LENGTH + CHAP_CHALLENGE_LENGTH + CIPHERTEXT_LENGTH #define NTLM_FORMAT_LABEL "netntlm" #define NTLM_FORMAT_NAME "NTLMv1 C/R" #define NTLM_TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH) #define ALGORITHM_NAME "MD4 DES (ESS MD5) " MD4_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define FULL_BINARY_SIZE (2 + 8 * 3) #define BINARY_SIZE (2 + 8) #define BINARY_ALIGN 2 #define SALT_SIZE 8 #define SALT_ALIGN MEM_ALIGN_WORD #define CIPHERTEXT_LENGTH 48 #ifdef SIMD_COEF_32 #define PLAINTEXT_LENGTH 27 //#define SSE_OMP #if defined (_OPENMP) && defined(SSE_OMP) #define BLOCK_LOOPS (2048 / NBKEYS) #else #define BLOCK_LOOPS (1024 / NBKEYS) #endif #define MIN_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS) #define MAX_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS) #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32*4 ) #else #define PLAINTEXT_LENGTH 64 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 2048 #endif #ifdef SIMD_COEF_32 static unsigned char *saved_key; #else static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); #endif static unsigned short (*crypt_key); static unsigned char *nthash; static ARCH_WORD_32 *bitmap; static int cmps_per_crypt, use_bitmap; static int valid_i, valid_j; static uchar *challenge; static int keys_prepared; static struct fmt_main *my; static char *chap_long_to_short(char *orig); /* used to cannonicalize the MSCHAPv2 format */ static struct fmt_tests chap_tests[] = { {"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"}, {"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"}, {"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"}, {"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */ #if PLAINTEXT_LENGTH >= 35 {"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */ #endif {"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */ {"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" }, {"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" }, /* Ettercap generated three test vectors */ {"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"}, {"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"}, {"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"}, /* Single test vector from chapcrack's sample pcap file */ {"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"}, {"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} }, {"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} }, {"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */ {"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */ {NULL} }; static struct fmt_tests ntlm_tests[] = { {"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"}, #ifndef SIMD_COEF_32 /* exceeds max length for SSE */ {"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"}, #endif {"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"}, {"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"}, {"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"}, {"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"}, {"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"}, {"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} }, {"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} }, {"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} }, {"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} }, {"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} }, {NULL} }; static inline void setup_des_key(uchar key_56[], DES_key_schedule *ks) { DES_cblock key; key[0] = key_56[0]; key[1] = (key_56[0] << 7) | (key_56[1] >> 1); key[2] = (key_56[1] << 6) | (key_56[2] >> 2); key[3] = (key_56[2] << 5) | (key_56[3] >> 3); key[4] = (key_56[3] << 4) | (key_56[4] >> 4); key[5] = (key_56[4] << 3) | (key_56[5] >> 5); key[6] = (key_56[5] << 2) | (key_56[6] >> 6); key[7] = (key_56[6] << 1); DES_set_key(&key, ks); } static int chap_valid_long(char *ciphertext) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if (strlen(ciphertext) > CHAP_TOTAL_LENGTH) return 0; /* Validate Authenticator/Server Challenge Length */ pos = &ciphertext[FORMAT_TAG_LEN]; for (pos2 = pos; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) ) return 0; /* Validate MSCHAPv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; /* Validate Peer/Client Challenge Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) ) return 0; /* Validate Username Length */ if (strlen(++pos2) > CHAP_USERNAME_LENGTH) return 0; return 1; } static int chap_valid_short(char *ciphertext) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if (strlen(ciphertext) > CHAP_TOTAL_LENGTH) return 0; /* Validate MSCHAPv2 Challenge Length */ pos = &ciphertext[FORMAT_TAG_LEN]; for (pos2 = pos; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 4)) ) return 0; /* Validate MSCHAPv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; return 1; } static void chap_get_challenge(const char *ciphertext, unsigned char *binary_salt) { int i; const char *pos = ciphertext + FORMAT_TAG_LEN; for (i = 0; i < SALT_SIZE; i++) binary_salt[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; } /* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or we are going to calculate it via: sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|) NOTE, we now ONLY call this function the the short form. The long form gets converted into the short form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF) */ static void *chap_get_salt(char *ciphertext) { static unsigned char *binary_salt; unsigned char digest[20]; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); /* This is just to silence scan-build. It will never happen. It is unclear why only this format gave warnings, many others do similar things. */ if (!ciphertext) return ciphertext; memset(binary_salt, 0, SALT_SIZE); memset(digest, 0, 20); chap_get_challenge(ciphertext, binary_salt); return (void*)binary_salt; } /* * This function will convert long hashes, into short ones (the short is now cannonical format) * converts * $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu * into * $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$ * * This code was moved from get_salt(). */ static char *chap_long_to_short(char *ciphertext) { static char Buf[CHAP_TOTAL_LENGTH+1]; // larger than we need, but not a big deal static SHA_CTX ctx; unsigned char tmp[16]; unsigned char digest[20]; char *pos = NULL; int i; SHA1_Init(&ctx); /* Peer Challenge */ pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */ memset(tmp, 0, 16); for (i = 0; i < 16; i++) tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; SHA1_Update(&ctx, tmp, 16); /* Authenticator Challenge */ pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */ memset(tmp, 0, 16); for (i = 0; i < 16; i++) tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; SHA1_Update(&ctx, tmp, 16); /* Username - Only the user name (as presented by the peer and excluding any prepended domain name) is used as input to SHAUpdate() */ pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */ SHA1_Update(&ctx, pos, strlen(pos)); SHA1_Final(digest, &ctx); // Ok, now we re-make our ciphertext buffer, into the short cannonical form. strcpy(Buf, FORMAT_TAG); pos = Buf + FORMAT_TAG_LEN; for (i = 0; i < SALT_SIZE; i++) { //binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; pos[(i<<1)] = itoa16[digest[i]>>4]; pos[(i<<1)+1] = itoa16[digest[i]&0xF]; } memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2); pos[16+CIPHERTEXT_LENGTH+2] = '$'; pos[16+CIPHERTEXT_LENGTH+3] = 0; //printf ("short=%s original=%s\n", Buf, ciphertext); return Buf; } static int chap_valid(char *ciphertext, struct fmt_main *pFmt) { char *cp = NULL; if (chap_valid_short(ciphertext)) cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1; else if (chap_valid_long(ciphertext)) cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1; if (cp) { uchar key[7] = {0, 0, 0, 0, 0, 0, 0}; DES_key_schedule ks; DES_cblock b3cmp; uchar binary[8]; DES_cblock *challenge = chap_get_salt(ciphertext); int i, j; cp += 2 * 8 * 2; for (i = 0; i < 8; i++) { binary[i] = atoi16[ARCH_INDEX(cp[i * 2])] << 4; binary[i] |= atoi16[ARCH_INDEX(cp[i * 2 + 1])]; } key[0] = valid_i; key[1] = valid_j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) return 1; for (i = 0; i < 0x100; i++) for (j = 0; j < 0x100; j++) { key[0] = i; key[1] = j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) { valid_i = i; valid_j = j; return 1; } } #ifdef DEBUG if (!bench_running) fprintf(stderr, "Rejected MSCHAPv2 hash with " "invalid 3rd block\n"); #endif } return 0; } static char *chap_prepare_long(char *split_fields[10]) { char *username, *cp; /* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */ if ((username = strstr(split_fields[0], "\\")) == NULL) username = split_fields[0]; else username++; cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+ 1+strlen(split_fields[5])+1+strlen(username)+1); sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4], split_fields[5], username); if (chap_valid_long(cp)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *chap_prepare_short(char *split_fields[10]) { char *cp; cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+ 1+1+1); sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]); if (chap_valid_short(cp)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *chap_prepare(char *split_fields[10], struct fmt_main *pFmt) { char *ret; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) { // check for a short format that has any extra trash fields, and if so remove them. char *cp1, *cp2, *cp3; cp1 = split_fields[1]; cp1 += FORMAT_TAG_LEN; cp2 = strchr(cp1, '$'); ret = NULL; if (cp2 && cp2-cp1 == CHAP_CHALLENGE_LENGTH/4) { ++cp2; cp3 = strchr(cp2, '$'); if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) { ret = str_alloc_copy(split_fields[1]); ret[(cp3-split_fields[1]) + 1] = '$'; ret[(cp3-split_fields[1]) + 2] = 0; //printf ("Here is the cut item: %s\n", ret); } } } else if (split_fields[0] && split_fields[3] && split_fields[4] && split_fields[5] && strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/2 && strlen(split_fields[4]) == CIPHERTEXT_LENGTH && strlen(split_fields[5]) == CHAP_CHALLENGE_LENGTH/2) ret = chap_prepare_long(split_fields); else if (split_fields[0] && split_fields[3] && split_fields[4] && strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/4 && strlen(split_fields[4]) == CIPHERTEXT_LENGTH) ret = chap_prepare_short(split_fields); else ret = NULL; if (ret && chap_valid_long(ret)) ret = chap_long_to_short(ret); else if (chap_valid_long(split_fields[1])) ret = chap_long_to_short(split_fields[1]); return ret ? ret : split_fields[1]; } static char *chap_split(char *ciphertext, int index, struct fmt_main *self) { static char out[CHAP_TOTAL_LENGTH + 1]; int i, j = 0; memset(out, 0, CHAP_TOTAL_LENGTH + 1); memcpy(out, ciphertext, strlen(ciphertext)); /* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */ for (i = FORMAT_TAG_LEN; i < CHAP_TOTAL_LENGTH + 1 && j < 3; i++) { if (out[i] >= 'A' && out[i] <= 'Z') out[i] |= 0x20; else if (out[i] == '$') j++; } if (chap_valid_long(out)) return chap_long_to_short(out); return out; } static void *ntlm_get_salt(char *ciphertext) { static uchar *binary_salt; int i; if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD); if (ciphertext[25] == '$') { // Server challenge ciphertext += FORMAT_TAGN_LEN; for (i = 0; i < SALT_SIZE; ++i) binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } else { uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE]; MD5_CTX ctx; ciphertext += FORMAT_TAGN_LEN; // Extended Session Security, // Concatenate Server & Client challenges for (i = 0;i < 2 * SALT_SIZE; ++i) es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; // MD5 the concatenated challenges, result is our key MD5_Init(&ctx); MD5_Update(&ctx, es_salt, 16); MD5_Final((void*)k1, &ctx); memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it } return (void*)binary_salt; } static int ntlm_valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, FORMAT_TAGN, FORMAT_TAGN_LEN)!=0) return 0; if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0; if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0; for (pos = &ciphertext[FORMAT_TAGN_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (*pos != '$') return 0; for (pos++; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) || (pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) { uchar key[7] = {0, 0, 0, 0, 0, 0, 0}; DES_key_schedule ks; DES_cblock b3cmp; uchar binary[8]; DES_cblock *challenge = ntlm_get_salt(ciphertext); int i, j; ciphertext = strrchr(ciphertext, '$') + 1 + 2 * 8 * 2; for (i = 0; i < 8; i++) { binary[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4; binary[i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } key[0] = valid_i; key[1] = valid_j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) return 1; for (i = 0; i < 0x100; i++) for (j = 0; j < 0x100; j++) { key[0] = i; key[1] = j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(binary, &b3cmp, 8)) { valid_i = i; valid_j = j; return 1; } } #ifdef DEBUG if (!bench_running) fprintf(stderr, "Rejected NetNTLM hash with invalid " "3rd block\n"); #endif } return 0; } static char *ntlm_prepare(char *split_fields[10], struct fmt_main *self) { char *cp; char clientChal[17]; if (!strncmp(split_fields[1], FORMAT_TAGN, FORMAT_TAGN_LEN)) return split_fields[1]; if (!split_fields[3]||!split_fields[4]||!split_fields[5]) return split_fields[1]; if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (!strncmp(&split_fields[4][32], "0101000000000000", 16)) return split_fields[1]; // Ignore anonymous login (Username "", Password "") if (split_fields[0] && strlen(split_fields[0]) == 0 && !strncasecmp(split_fields[3], "edb7398877d716be", 16) && !strncasecmp(split_fields[4], "42aeb71fbb6dc18499016b08" "b178ba65430ad39ae2498629", 48)) return split_fields[1]; // Handle ESS (8 byte client challenge in "LM" field padded with zeros) if (strlen(split_fields[3]) == 48 && !strncmp(&split_fields[3][16], "00000000000000000000000000000000", 32)) { memcpy(clientChal, split_fields[3],16); clientChal[16] = 0; } else clientChal[0] = 0; cp = mem_alloc(FORMAT_TAGN_LEN+strlen(split_fields[5])+strlen(clientChal)+1+ strlen(split_fields[4])+1); sprintf(cp, "%s%s%s$%s", FORMAT_TAGN, split_fields[5], clientChal, split_fields[4]); if (ntlm_valid(cp,self)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *ntlm_split(char *ciphertext, int index, struct fmt_main *self) { static char out[NTLM_TOTAL_LENGTH + 1]; memset(out, 0, NTLM_TOTAL_LENGTH + 1); strcpy(out, ciphertext); strlwr(&out[FORMAT_TAGN_LEN]); /* Exclude: $NETNTLM$ */ return out; } static void set_salt(void *salt) { challenge = salt; } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key_ansi(char *_key, int index) { #ifdef SIMD_COEF_32 const uchar *key = (uchar*)_key; unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key[index]; while (*s) *d++ = *s++; *d = 0; saved_len[index] = (int)((char*)d - (char*)saved_key[index]); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key[index]; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_len[index] = (int)((char*)d - (char*)saved_key[index]); #endif #endif keys_prepared = 0; } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef SIMD_COEF_32 const uchar *key = (uchar*)_key; unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH + 1, (uchar*)_key, strlen(_key)) << 1; if (saved_len[index] < 0) saved_len[index] = strlen16(saved_key[index]); #endif keys_prepared = 0; } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef SIMD_COEF_32 const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead; extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; #if NT_FULL_UNICODE if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else #endif if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += SIMD_COEF_32; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len[index] = utf8_to_utf16(saved_key[index], PLAINTEXT_LENGTH + 1, (uchar*)_key, strlen(_key)) << 1; if (saved_len[index] < 0) saved_len[index] = strlen16(saved_key[index]); #endif keys_prepared = 0; } static void init(struct fmt_main *self) { #if defined (_OPENMP) && !defined(SIMD_COEF_32) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif my = self; if (options.target_enc == UTF_8) { self->methods.set_key = set_key_utf8; self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) self->methods.set_key = set_key_CP; } if (!saved_key) { #if SIMD_COEF_32 saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key) * 64, MEM_ALIGN_SIMD); nthash = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*nthash) * 16, MEM_ALIGN_SIMD); #else saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); nthash = mem_calloc(self->params.max_keys_per_crypt, sizeof(*nthash) * 16); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); #endif crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(unsigned short)); } if (bitmap == NULL) bitmap = mem_calloc_align(1, 0x10000 / 8, MEM_ALIGN_CACHE); else memset(bitmap, 0, 0x10000 / 8); use_bitmap = 0; /* we did not use bitmap yet */ cmps_per_crypt = 2; /* try bitmap */ } static void done(void) { MEM_FREE(bitmap); MEM_FREE(crypt_key); MEM_FREE(nthash); #ifndef SIMD_COEF_32 MEM_FREE(saved_len); #endif MEM_FREE(saved_key); } // Get the key back from the key buffer, from UCS-2 static char *get_key(int index) { #ifdef SIMD_COEF_32 unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1]; unsigned int md4_size=0; unsigned int i=0; for(; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++) { key[md4_size] = keybuf_word[i]; key[md4_size+1] = keybuf_word[i] >> 16; if (key[md4_size] == 0x80 && key[md4_size+1] == 0) { key[md4_size] = 0; break; } ++md4_size; if (key[md4_size] == 0x80 && ((keybuf_word[i+SIMD_COEF_32]&0xFFFF) == 0 || md4_size == PLAINTEXT_LENGTH)) { key[md4_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key[index]); #endif } static void *get_binary(char *ciphertext) { static uchar *binary; static int warned = 0, loaded = 0; DES_cblock *challenge = my->methods.salt(ciphertext); int i, j; if (!binary) binary = mem_alloc_tiny(FULL_BINARY_SIZE, BINARY_ALIGN); if (john_main_process) if (!warned && !ldr_in_pot && !bench_running && ++loaded > 100) { warned = 1; fprintf(stderr, "%s: Note: slow loading. For short runs, try " "--format=%s-naive\ninstead. That version loads " "faster but runs slower.\n", my->params.label, my->params.label); } if (chap_valid_short(ciphertext)) ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1; else if (chap_valid_long(ciphertext)) ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1; else /* ntlmv1 */ ciphertext = strrchr(ciphertext, '$') + 1; for (i = 0; i < FULL_BINARY_SIZE - 2; i++) { binary[2 + i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4; binary[2 + i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } { uchar key[7] = {0, 0, 0, 0, 0, 0, 0}; DES_key_schedule ks; DES_cblock b3cmp; key[0] = valid_i; key[1] = valid_j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) { binary[0] = valid_i; binary[1] = valid_j; goto out; } for (i = 0; i < 0x100; i++) for (j = 0; j < 0x100; j++) { key[0] = i; key[1] = j; setup_des_key(key, &ks); DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT); if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) { binary[0] = i; binary[1] = j; goto out; } } fprintf(stderr, "Bug: %s hash with invalid 3rd block, should " "have been rejected in valid()\n", my->params.label); binary[0] = binary[1] = 0x55; } out: return binary; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; if (!keys_prepared) { int i = 0; if (use_bitmap) { #if MAX_KEYS_PER_CRYPT >= 200 //#warning Notice: Using memset memset(bitmap, 0, 0x10000 / 8); #else //#warning Notice: Not using memset #ifdef SIMD_COEF_32 for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) #else for (i = 0; i < count; i++) #endif { unsigned int value = crypt_key[i]; bitmap[value >> 5] = 0; } #endif } use_bitmap = cmps_per_crypt >= 2; cmps_per_crypt = 0; #ifdef SIMD_COEF_32 #if (BLOCK_LOOPS > 1) #if defined(_OPENMP) && defined(SSE_OMP) #pragma omp parallel for #endif for (i = 0; i < BLOCK_LOOPS; i++) SIMDmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN); #else SIMDmd4body(saved_key, (unsigned int*)nthash, NULL, SSEi_MIXED_IN); #endif if (use_bitmap) for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) { unsigned int value; value = *(ARCH_WORD_32*) &nthash[GETOUTPOS(12, i)] >> 16; crypt_key[i] = value; bitmap[value >> 5] |= 1U << (value & 0x1f); } else for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) { crypt_key[i] = *(ARCH_WORD_32*) &nthash[GETOUTPOS(12, i)] >> 16; } #else #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) #endif { MD4_CTX ctx; MD4_Init( &ctx ); MD4_Update(&ctx, saved_key[i], saved_len[i]); MD4_Final((uchar*)&nthash[i * 16], &ctx); crypt_key[i] = ((unsigned short*)&nthash[i * 16])[7]; if (use_bitmap) { unsigned int value = crypt_key[i]; bitmap[value >> 5] |= 1U << (value & 0x1f); } } #endif keys_prepared = 1; } return count; } static int cmp_one(void *binary, int index) { if (crypt_key[index] == *(unsigned short*)binary) { DES_key_schedule ks; DES_cblock computed_binary; unsigned int key[2]; #ifdef SIMD_COEF_32 int i; for (i = 0; i < 2; i++) key[i] = *(ARCH_WORD_32*) &nthash[GETOUTPOS(4 * i, index)]; #else memcpy(key, &nthash[index * 16], 8); #endif setup_des_key((unsigned char*)key, &ks); DES_ecb_encrypt((DES_cblock*)challenge, &computed_binary, &ks, DES_ENCRYPT); return !memcmp(((char*)binary) + 2, computed_binary, 8); } return 0; } static int cmp_all(void *binary, int count) { unsigned int value = *(unsigned short*)binary; int index; cmps_per_crypt++; if (use_bitmap && !(bitmap[value >> 5] & (1U << (value & 0x1f)))) goto out; #ifdef SIMD_COEF_32 /* Let's give the optimizer a hint! */ for (index = 0; index < NBKEYS * BLOCK_LOOPS; index += 2) #else for (index = 0; index < count; index += 2) #endif { unsigned int a = crypt_key[index]; unsigned int b = crypt_key[index + 1]; #if 0 if (((a | b) & value) != value) continue; #endif if (a == value || b == value) goto thorough; } goto out; thorough: #ifdef SIMD_COEF_32 for (index = 0; index < NBKEYS * BLOCK_LOOPS; index++) #else for (; index < count; index++) #endif { if (crypt_key[index] == value && cmp_one(binary, index)) return 1; } out: return 0; } static int cmp_exact(char *source, int index) { DES_key_schedule ks; uchar binary[24]; unsigned char key[21]; char *cp; int i; #ifdef SIMD_COEF_32 for (i = 0; i < 4; i++) ((ARCH_WORD_32*)key)[i] = *(ARCH_WORD_32*) &nthash[GETOUTPOS(4 * i, index)]; #else memcpy(key, &nthash[index * 16], 16); #endif /* Hash is NULL padded to 21-bytes */ memset(&key[16], 0, 5); /* Split into three 7-byte segments for use as DES keys Use each key to DES encrypt challenge Concatenate output to for 24-byte NTLM response */ setup_des_key(key, &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary, &ks, DES_ENCRYPT); setup_des_key(&key[7], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8], &ks, DES_ENCRYPT); setup_des_key(&key[14], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16], &ks, DES_ENCRYPT); // With the normalized source we simply need to skip the // $MSCHAPv2$hhhhhhhhhhhhhhhh$ string to get 'real' binary data. // $NETNTLM$c75c20bff9baa71f4765f360625700b0$ cp = &source[11]; cp = strchr(cp, '$'); ++cp; for (i = 0; i < 24; ++i) { unsigned char c = (atoi16[ARCH_INDEX(*cp)] << 4) + (atoi16[ARCH_INDEX(*(cp+1))] ); if (c != binary[i]) return 0; cp += 2; } return 1; } static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); } static int binary_hash_0(void *binary) { return *(unsigned short*)binary & PH_MASK_0; } static int binary_hash_1(void *binary) { return *(unsigned short*)binary & PH_MASK_1; } static int binary_hash_2(void *binary) { return *(unsigned short*)binary & PH_MASK_2; } static int binary_hash_3(void *binary) { return *(unsigned short*)binary & PH_MASK_3; } static int get_hash_0(int index) { return crypt_key[index] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index] & PH_MASK_3; } struct fmt_main fmt_MSCHAPv2_new = { { CHAP_FORMAT_LABEL, CHAP_FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if !defined(SIMD_COEF_32) || (defined(SIMD_COEF_32) && defined(SSE_OMP)) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, chap_tests }, { init, done, fmt_default_reset, chap_prepare, chap_valid, chap_split, get_binary, chap_get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, NULL, NULL, NULL }, salt_hash, NULL, set_salt, set_key_ansi, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, NULL, NULL, NULL }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_NETNTLM_new = { { NTLM_FORMAT_LABEL, NTLM_FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if !defined(SIMD_COEF_32) || (defined(SIMD_PARA_MD4) && defined(SSE_OMP)) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAGN }, ntlm_tests }, { init, done, fmt_default_reset, ntlm_prepare, ntlm_valid, ntlm_split, get_binary, ntlm_get_salt, { NULL }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, NULL, NULL, NULL }, salt_hash, NULL, set_salt, set_key_ansi, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, NULL, NULL, NULL }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
tune.h
// tune.h - PIGEON CHESS ENGINE (c) 2012-2016 Stuart Riffle namespace Pigeon { #ifndef PIGEON_TUNE_H__ #define PIGEON_TUNE_H__ class AutoTuner : public AmoebaOptimizer { struct TestPos { i8 mCoeff[EVAL_TERMS]; float mExpected; float mWeight; }; std::vector< TestPos > mTestPos; Evaluator mEvaluator; public: AutoTuner() { ParameterSet init( EVAL_TERMS ); ParameterSet range( EVAL_TERMS ); EvalWeight weights[EVAL_TERMS]; mEvaluator.GenerateWeights( weights, 1.0f ); for( int i = 0; i < EVAL_TERMS; i++ ) { float weight = weights[i] * 1.0f / WEIGHT_SCALE; init.mElem[i] = weight; range.mElem[i] = (weight < 100)? (weight * 0.5f) : 100.0f; } range.mElem[EVAL_PAWNS] = 0; range.mElem[EVAL_KINGS] = 0; range.mElem[EVAL_ROOKS_CONNECTED] = 0; this->Initialize( init, range ); mTestPos.reserve( 100000 ); } bool LoadGameLine( const char* str ) { Tokenizer tok( str ); float expected = 0; if( tok.Consume( "W" ) ) expected = 1.0f; else if( tok.Consume( "L" ) ) expected = 0.0f; else if( tok.Consume( "D" ) ) expected = 0.5f; else return( false ); Position pos; pos.Reset(); for( const char* movetext = tok.ConsumeNext(); movetext; movetext = tok.ConsumeNext() ) { MoveSpec spec; FEN::StringToMoveSpec( movetext, spec ); if( spec.IsPromotion() ) break; pos.Step( spec ); expected = 1.0f - expected; MoveMap mmap; pos.CalcMoveMap( &mmap ); if( this->IsValidTestPos( pos, mmap ) ) { TestPos testPos; testPos.mExpected = expected; float phase = mEvaluator.CalcGamePhase< 1 >( pos ); testPos.mWeight = 1.0f;//1.0f - abs( 1.0f - phase ); //if( (phase >= 1.0f) && (phase < 1.9f) )//testPos.mWeight > 0.3f ) //if( phase >= 1.0f ) { u64 coeff[EVAL_TERMS]; mEvaluator.CalcEvalTerms< 1, u64 >( pos, mmap, coeff ); for( int i = 0; i < EVAL_TERMS; i++ ) testPos.mCoeff[i] = (int) coeff[i]; mTestPos.push_back( testPos ); } } } return( true ); } void Dump() { const ParameterSet& best = this->GetBest(); printf( "\n" ); printf( "%" PRId64 " iterations, error %.15f\n", this->GetIterCount(), best.mError ); for( size_t i = 0; i < best.mElem.size(); i++ ) printf( "%-22s %9.2f\n", mEvaluator.GetWeightName( (int) i ), best.mElem[i] ); } protected: bool IsValidTestPos( const Position& pos, const MoveMap& mmap ) { MoveList moves; moves.UnpackMoveMap( pos, mmap ); moves.DiscardMovesBelow( CAPTURE_WINNING ); if( moves.mCount == 0 ) return( true ); return( false ); } virtual void CalcError( ParameterSet& point ) { double accum = 0; double divisor = 0; double factor = -1.0 / 400.0; #pragma omp parallel for reduction(+: accum,divisor) schedule(static) for( int i = 0; i < (int) mTestPos.size(); i++ ) { //const TestPos& testPos = *iter; const TestPos& testPos = mTestPos[i]; double score = 0; for( size_t i = 0; i < EVAL_TERMS; i++ ) score += point.mElem[i] * testPos.mCoeff[i]; double sig = 1.0 / (1.0 + pow( 10.0, score * factor )); double err = (sig - testPos.mExpected); accum = accum + (err * err) * testPos.mWeight; divisor = divisor + testPos.mWeight; } point.mError = (accum / divisor); printf( "." ); } }; #endif // PIGEON_TUNE_H__ };
gimple.h
/* Gimple IR definitions. Copyright (C) 2007-2014 Free Software Foundation, Inc. Contributed by Aldy Hernandez <aldyh@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_GIMPLE_H #define GCC_GIMPLE_H typedef gimple gimple_seq_node; /* For each block, the PHI nodes that need to be rewritten are stored into these vectors. */ typedef vec<gimple> gimple_vec; enum gimple_code { #define DEFGSCODE(SYM, STRING, STRUCT) SYM, #include "gimple.def" #undef DEFGSCODE LAST_AND_UNUSED_GIMPLE_CODE }; extern const char *const gimple_code_name[]; extern const unsigned char gimple_rhs_class_table[]; /* Error out if a gimple tuple is addressed incorrectly. */ #if defined ENABLE_GIMPLE_CHECKING #define gcc_gimple_checking_assert(EXPR) gcc_assert (EXPR) extern void gimple_check_failed (const_gimple, const char *, int, \ const char *, enum gimple_code, \ enum tree_code) ATTRIBUTE_NORETURN; #define GIMPLE_CHECK(GS, CODE) \ do { \ const_gimple __gs = (GS); \ if (gimple_code (__gs) != (CODE)) \ gimple_check_failed (__gs, __FILE__, __LINE__, __FUNCTION__, \ (CODE), ERROR_MARK); \ } while (0) #else /* not ENABLE_GIMPLE_CHECKING */ #define gcc_gimple_checking_assert(EXPR) ((void)(0 && (EXPR))) #define GIMPLE_CHECK(GS, CODE) (void)0 #endif /* Class of GIMPLE expressions suitable for the RHS of assignments. See get_gimple_rhs_class. */ enum gimple_rhs_class { GIMPLE_INVALID_RHS, /* The expression cannot be used on the RHS. */ GIMPLE_TERNARY_RHS, /* The expression is a ternary operation. */ GIMPLE_BINARY_RHS, /* The expression is a binary operation. */ GIMPLE_UNARY_RHS, /* The expression is a unary operation. */ GIMPLE_SINGLE_RHS /* The expression is a single object (an SSA name, a _DECL, a _REF, etc. */ }; /* Specific flags for individual GIMPLE statements. These flags are always stored in gimple_statement_base.subcode and they may only be defined for statement codes that do not use subcodes. Values for the masks can overlap as long as the overlapping values are never used in the same statement class. The maximum mask value that can be defined is 1 << 15 (i.e., each statement code can hold up to 16 bitflags). Keep this list sorted. */ enum gf_mask { GF_ASM_INPUT = 1 << 0, GF_ASM_VOLATILE = 1 << 1, GF_CALL_FROM_THUNK = 1 << 0, GF_CALL_RETURN_SLOT_OPT = 1 << 1, GF_CALL_TAILCALL = 1 << 2, GF_CALL_VA_ARG_PACK = 1 << 3, GF_CALL_NOTHROW = 1 << 4, GF_CALL_ALLOCA_FOR_VAR = 1 << 5, GF_CALL_INTERNAL = 1 << 6, GF_CALL_CTRL_ALTERING = 1 << 7, GF_OMP_PARALLEL_COMBINED = 1 << 0, GF_OMP_FOR_KIND_MASK = 3 << 0, GF_OMP_FOR_KIND_FOR = 0 << 0, GF_OMP_FOR_KIND_DISTRIBUTE = 1 << 0, GF_OMP_FOR_KIND_SIMD = 2 << 0, GF_OMP_FOR_KIND_CILKSIMD = 3 << 0, GF_OMP_FOR_COMBINED = 1 << 2, GF_OMP_FOR_COMBINED_INTO = 1 << 3, GF_OMP_TARGET_KIND_MASK = 3 << 0, GF_OMP_TARGET_KIND_REGION = 0 << 0, GF_OMP_TARGET_KIND_DATA = 1 << 0, GF_OMP_TARGET_KIND_UPDATE = 2 << 0, /* True on an GIMPLE_OMP_RETURN statement if the return does not require a thread synchronization via some sort of barrier. The exact barrier that would otherwise be emitted is dependent on the OMP statement with which this return is associated. */ GF_OMP_RETURN_NOWAIT = 1 << 0, GF_OMP_SECTION_LAST = 1 << 0, GF_OMP_ATOMIC_NEED_VALUE = 1 << 0, GF_OMP_ATOMIC_SEQ_CST = 1 << 1, GF_PREDICT_TAKEN = 1 << 15 }; /* Currently, there are only two types of gimple debug stmt. Others are envisioned, for example, to enable the generation of is_stmt notes in line number information, to mark sequence points, etc. This subcode is to be used to tell them apart. */ enum gimple_debug_subcode { GIMPLE_DEBUG_BIND = 0, GIMPLE_DEBUG_SOURCE_BIND = 1 }; /* Masks for selecting a pass local flag (PLF) to work on. These masks are used by gimple_set_plf and gimple_plf. */ enum plf_mask { GF_PLF_1 = 1 << 0, GF_PLF_2 = 1 << 1 }; /* Data structure definitions for GIMPLE tuples. NOTE: word markers are for 64 bit hosts. */ struct GTY((desc ("gimple_statement_structure (&%h)"), tag ("GSS_BASE"), chain_next ("%h.next"), variable_size)) gimple_statement_base { /* [ WORD 1 ] Main identifying code for a tuple. */ ENUM_BITFIELD(gimple_code) code : 8; /* Nonzero if a warning should not be emitted on this tuple. */ unsigned int no_warning : 1; /* Nonzero if this tuple has been visited. Passes are responsible for clearing this bit before using it. */ unsigned int visited : 1; /* Nonzero if this tuple represents a non-temporal move. */ unsigned int nontemporal_move : 1; /* Pass local flags. These flags are free for any pass to use as they see fit. Passes should not assume that these flags contain any useful value when the pass starts. Any initial state that the pass requires should be set on entry to the pass. See gimple_set_plf and gimple_plf for usage. */ unsigned int plf : 2; /* Nonzero if this statement has been modified and needs to have its operands rescanned. */ unsigned modified : 1; /* Nonzero if this statement contains volatile operands. */ unsigned has_volatile_ops : 1; /* Padding to get subcode to 16 bit alignment. */ unsigned pad : 1; /* The SUBCODE field can be used for tuple-specific flags for tuples that do not require subcodes. Note that SUBCODE should be at least as wide as tree codes, as several tuples store tree codes in there. */ unsigned int subcode : 16; /* UID of this statement. This is used by passes that want to assign IDs to statements. It must be assigned and used by each pass. By default it should be assumed to contain garbage. */ unsigned uid; /* [ WORD 2 ] Locus information for debug info. */ location_t location; /* Number of operands in this tuple. */ unsigned num_ops; /* [ WORD 3 ] Basic block holding this statement. */ basic_block bb; /* [ WORD 4-5 ] Linked lists of gimple statements. The next pointers form a NULL terminated list, the prev pointers are a cyclic list. A gimple statement is hence also a double-ended list of statements, with the pointer itself being the first element, and the prev pointer being the last. */ gimple next; gimple GTY((skip)) prev; }; /* Base structure for tuples with operands. */ /* This gimple subclass has no tag value. */ struct GTY(()) gimple_statement_with_ops_base : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] SSA operand vectors. NOTE: It should be possible to amalgamate these vectors with the operand vector OP. However, the SSA operand vectors are organized differently and contain more information (like immediate use chaining). */ struct use_optype_d GTY((skip (""))) *use_ops; }; /* Statements that take register operands. */ struct GTY((tag("GSS_WITH_OPS"))) gimple_statement_with_ops : public gimple_statement_with_ops_base { /* [ WORD 1-7 ] : base class */ /* [ WORD 8 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.num_ops"))) op[1]; }; /* Base for statements that take both memory and register operands. */ struct GTY((tag("GSS_WITH_MEM_OPS_BASE"))) gimple_statement_with_memory_ops_base : public gimple_statement_with_ops_base { /* [ WORD 1-7 ] : base class */ /* [ WORD 8-9 ] Virtual operands for this statement. The GC will pick them up via the ssa_names array. */ tree GTY((skip (""))) vdef; tree GTY((skip (""))) vuse; }; /* Statements that take both memory and register operands. */ struct GTY((tag("GSS_WITH_MEM_OPS"))) gimple_statement_with_memory_ops : public gimple_statement_with_memory_ops_base { /* [ WORD 1-9 ] : base class */ /* [ WORD 10 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.num_ops"))) op[1]; }; /* Call statements that take both memory and register operands. */ struct GTY((tag("GSS_CALL"))) gimple_statement_call : public gimple_statement_with_memory_ops_base { /* [ WORD 1-9 ] : base class */ /* [ WORD 10-13 ] */ struct pt_solution call_used; struct pt_solution call_clobbered; /* [ WORD 14 ] */ union GTY ((desc ("%1.subcode & GF_CALL_INTERNAL"))) { tree GTY ((tag ("0"))) fntype; enum internal_fn GTY ((tag ("GF_CALL_INTERNAL"))) internal_fn; } u; /* [ WORD 15 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.num_ops"))) op[1]; }; /* OpenMP statements (#pragma omp). */ struct GTY((tag("GSS_OMP"))) gimple_statement_omp : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] */ gimple_seq body; }; /* GIMPLE_BIND */ struct GTY((tag("GSS_BIND"))) gimple_statement_bind : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] Variables declared in this scope. */ tree vars; /* [ WORD 8 ] This is different than the BLOCK field in gimple_statement_base, which is analogous to TREE_BLOCK (i.e., the lexical block holding this statement). This field is the equivalent of BIND_EXPR_BLOCK in tree land (i.e., the lexical scope defined by this bind). See gimple-low.c. */ tree block; /* [ WORD 9 ] */ gimple_seq body; }; /* GIMPLE_CATCH */ struct GTY((tag("GSS_CATCH"))) gimple_statement_catch : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] */ tree types; /* [ WORD 8 ] */ gimple_seq handler; }; /* GIMPLE_EH_FILTER */ struct GTY((tag("GSS_EH_FILTER"))) gimple_statement_eh_filter : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] Filter types. */ tree types; /* [ WORD 8 ] Failure actions. */ gimple_seq failure; }; /* GIMPLE_EH_ELSE */ struct GTY((tag("GSS_EH_ELSE"))) gimple_statement_eh_else : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7,8 ] */ gimple_seq n_body, e_body; }; /* GIMPLE_EH_MUST_NOT_THROW */ struct GTY((tag("GSS_EH_MNT"))) gimple_statement_eh_mnt : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] Abort function decl. */ tree fndecl; }; /* GIMPLE_PHI */ struct GTY((tag("GSS_PHI"))) gimple_statement_phi : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] */ unsigned capacity; unsigned nargs; /* [ WORD 8 ] */ tree result; /* [ WORD 9 ] */ struct phi_arg_d GTY ((length ("%h.nargs"))) args[1]; }; /* GIMPLE_RESX, GIMPLE_EH_DISPATCH */ struct GTY((tag("GSS_EH_CTRL"))) gimple_statement_eh_ctrl : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] Exception region number. */ int region; }; struct GTY((tag("GSS_EH_CTRL"))) gimple_statement_resx : public gimple_statement_eh_ctrl { /* No extra fields; adds invariant: stmt->code == GIMPLE_RESX. */ }; struct GTY((tag("GSS_EH_CTRL"))) gimple_statement_eh_dispatch : public gimple_statement_eh_ctrl { /* No extra fields; adds invariant: stmt->code == GIMPLE_EH_DISPATH. */ }; /* GIMPLE_TRY */ struct GTY((tag("GSS_TRY"))) gimple_statement_try : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] Expression to evaluate. */ gimple_seq eval; /* [ WORD 8 ] Cleanup expression. */ gimple_seq cleanup; }; /* Kind of GIMPLE_TRY statements. */ enum gimple_try_flags { /* A try/catch. */ GIMPLE_TRY_CATCH = 1 << 0, /* A try/finally. */ GIMPLE_TRY_FINALLY = 1 << 1, GIMPLE_TRY_KIND = GIMPLE_TRY_CATCH | GIMPLE_TRY_FINALLY, /* Analogous to TRY_CATCH_IS_CLEANUP. */ GIMPLE_TRY_CATCH_IS_CLEANUP = 1 << 2 }; /* GIMPLE_WITH_CLEANUP_EXPR */ struct GTY((tag("GSS_WCE"))) gimple_statement_wce : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* Subcode: CLEANUP_EH_ONLY. True if the cleanup should only be executed if an exception is thrown, not on normal exit of its scope. This flag is analogous to the CLEANUP_EH_ONLY flag in TARGET_EXPRs. */ /* [ WORD 7 ] Cleanup expression. */ gimple_seq cleanup; }; /* GIMPLE_ASM */ struct GTY((tag("GSS_ASM"))) gimple_statement_asm : public gimple_statement_with_memory_ops_base { /* [ WORD 1-9 ] : base class */ /* [ WORD 10 ] __asm__ statement. */ const char *string; /* [ WORD 11 ] Number of inputs, outputs, clobbers, labels. */ unsigned char ni; unsigned char no; unsigned char nc; unsigned char nl; /* [ WORD 12 ] Operand vector. NOTE! This must always be the last field of this structure. In particular, this means that this structure cannot be embedded inside another one. */ tree GTY((length ("%h.num_ops"))) op[1]; }; /* GIMPLE_OMP_CRITICAL */ struct GTY((tag("GSS_OMP_CRITICAL"))) gimple_statement_omp_critical : public gimple_statement_omp { /* [ WORD 1-7 ] : base class */ /* [ WORD 8 ] Critical section name. */ tree name; }; struct GTY(()) gimple_omp_for_iter { /* Condition code. */ enum tree_code cond; /* Index variable. */ tree index; /* Initial value. */ tree initial; /* Final value. */ tree final; /* Increment. */ tree incr; }; /* GIMPLE_OMP_FOR */ struct GTY((tag("GSS_OMP_FOR"))) gimple_statement_omp_for : public gimple_statement_omp { /* [ WORD 1-7 ] : base class */ /* [ WORD 8 ] */ tree clauses; /* [ WORD 9 ] Number of elements in iter array. */ size_t collapse; /* [ WORD 10 ] */ struct gimple_omp_for_iter * GTY((length ("%h.collapse"))) iter; /* [ WORD 11 ] Pre-body evaluated before the loop body begins. */ gimple_seq pre_body; }; /* GIMPLE_OMP_PARALLEL, GIMPLE_OMP_TARGET */ struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT"))) gimple_statement_omp_parallel_layout : public gimple_statement_omp { /* [ WORD 1-7 ] : base class */ /* [ WORD 8 ] Clauses. */ tree clauses; /* [ WORD 9 ] Child function holding the body of the parallel region. */ tree child_fn; /* [ WORD 10 ] Shared data argument. */ tree data_arg; }; /* GIMPLE_OMP_PARALLEL or GIMPLE_TASK */ struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT"))) gimple_statement_omp_taskreg : public gimple_statement_omp_parallel_layout { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_PARALLEL || stmt->code == GIMPLE_OMP_TASK. */ }; /* GIMPLE_OMP_PARALLEL */ struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT"))) gimple_statement_omp_parallel : public gimple_statement_omp_taskreg { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_PARALLEL. */ }; struct GTY((tag("GSS_OMP_PARALLEL_LAYOUT"))) gimple_statement_omp_target : public gimple_statement_omp_parallel_layout { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_TARGET. */ }; /* GIMPLE_OMP_TASK */ struct GTY((tag("GSS_OMP_TASK"))) gimple_statement_omp_task : public gimple_statement_omp_taskreg { /* [ WORD 1-10 ] : base class */ /* [ WORD 11 ] Child function holding firstprivate initialization if needed. */ tree copy_fn; /* [ WORD 12-13 ] Size and alignment in bytes of the argument data block. */ tree arg_size; tree arg_align; }; /* GIMPLE_OMP_SECTION */ /* Uses struct gimple_statement_omp. */ /* GIMPLE_OMP_SECTIONS */ struct GTY((tag("GSS_OMP_SECTIONS"))) gimple_statement_omp_sections : public gimple_statement_omp { /* [ WORD 1-7 ] : base class */ /* [ WORD 8 ] */ tree clauses; /* [ WORD 9 ] The control variable used for deciding which of the sections to execute. */ tree control; }; /* GIMPLE_OMP_CONTINUE. Note: This does not inherit from gimple_statement_omp, because we do not need the body field. */ struct GTY((tag("GSS_OMP_CONTINUE"))) gimple_statement_omp_continue : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] */ tree control_def; /* [ WORD 8 ] */ tree control_use; }; /* GIMPLE_OMP_SINGLE, GIMPLE_OMP_TEAMS */ struct GTY((tag("GSS_OMP_SINGLE_LAYOUT"))) gimple_statement_omp_single_layout : public gimple_statement_omp { /* [ WORD 1-7 ] : base class */ /* [ WORD 7 ] */ tree clauses; }; struct GTY((tag("GSS_OMP_SINGLE_LAYOUT"))) gimple_statement_omp_single : public gimple_statement_omp_single_layout { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_SINGLE. */ }; struct GTY((tag("GSS_OMP_SINGLE_LAYOUT"))) gimple_statement_omp_teams : public gimple_statement_omp_single_layout { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_TEAMS. */ }; /* GIMPLE_OMP_ATOMIC_LOAD. Note: This is based on gimple_statement_base, not g_s_omp, because g_s_omp contains a sequence, which we don't need here. */ struct GTY((tag("GSS_OMP_ATOMIC_LOAD"))) gimple_statement_omp_atomic_load : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7-8 ] */ tree rhs, lhs; }; /* GIMPLE_OMP_ATOMIC_STORE. See note on GIMPLE_OMP_ATOMIC_LOAD. */ struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT"))) gimple_statement_omp_atomic_store_layout : public gimple_statement_base { /* [ WORD 1-6 ] : base class */ /* [ WORD 7 ] */ tree val; }; struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT"))) gimple_statement_omp_atomic_store : public gimple_statement_omp_atomic_store_layout { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_ATOMIC_STORE. */ }; struct GTY((tag("GSS_OMP_ATOMIC_STORE_LAYOUT"))) gimple_statement_omp_return : public gimple_statement_omp_atomic_store_layout { /* No extra fields; adds invariant: stmt->code == GIMPLE_OMP_RETURN. */ }; /* GIMPLE_TRANSACTION. */ /* Bits to be stored in the GIMPLE_TRANSACTION subcode. */ /* The __transaction_atomic was declared [[outer]] or it is __transaction_relaxed. */ #define GTMA_IS_OUTER (1u << 0) #define GTMA_IS_RELAXED (1u << 1) #define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED) /* The transaction is seen to not have an abort. */ #define GTMA_HAVE_ABORT (1u << 2) /* The transaction is seen to have loads or stores. */ #define GTMA_HAVE_LOAD (1u << 3) #define GTMA_HAVE_STORE (1u << 4) /* The transaction MAY enter serial irrevocable mode in its dynamic scope. */ #define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5) /* The transaction WILL enter serial irrevocable mode. An irrevocable block post-dominates the entire transaction, such that all invocations of the transaction will go serial-irrevocable. In such case, we don't bother instrumenting the transaction, and tell the runtime that it should begin the transaction in serial-irrevocable mode. */ #define GTMA_DOES_GO_IRREVOCABLE (1u << 6) /* The transaction contains no instrumentation code whatsover, most likely because it is guaranteed to go irrevocable upon entry. */ #define GTMA_HAS_NO_INSTRUMENTATION (1u << 7) struct GTY((tag("GSS_TRANSACTION"))) gimple_statement_transaction : public gimple_statement_with_memory_ops_base { /* [ WORD 1-9 ] : base class */ /* [ WORD 10 ] */ gimple_seq body; /* [ WORD 11 ] */ tree label; }; #define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM, enum gimple_statement_structure_enum { #include "gsstruct.def" LAST_GSS_ENUM }; #undef DEFGSSTRUCT template <> template <> inline bool is_a_helper <gimple_statement_asm>::test (gimple gs) { return gs->code == GIMPLE_ASM; } template <> template <> inline bool is_a_helper <gimple_statement_bind>::test (gimple gs) { return gs->code == GIMPLE_BIND; } template <> template <> inline bool is_a_helper <gimple_statement_call>::test (gimple gs) { return gs->code == GIMPLE_CALL; } template <> template <> inline bool is_a_helper <gimple_statement_catch>::test (gimple gs) { return gs->code == GIMPLE_CATCH; } template <> template <> inline bool is_a_helper <gimple_statement_resx>::test (gimple gs) { return gs->code == GIMPLE_RESX; } template <> template <> inline bool is_a_helper <gimple_statement_eh_dispatch>::test (gimple gs) { return gs->code == GIMPLE_EH_DISPATCH; } template <> template <> inline bool is_a_helper <gimple_statement_eh_else>::test (gimple gs) { return gs->code == GIMPLE_EH_ELSE; } template <> template <> inline bool is_a_helper <gimple_statement_eh_filter>::test (gimple gs) { return gs->code == GIMPLE_EH_FILTER; } template <> template <> inline bool is_a_helper <gimple_statement_eh_mnt>::test (gimple gs) { return gs->code == GIMPLE_EH_MUST_NOT_THROW; } template <> template <> inline bool is_a_helper <gimple_statement_omp_atomic_load>::test (gimple gs) { return gs->code == GIMPLE_OMP_ATOMIC_LOAD; } template <> template <> inline bool is_a_helper <gimple_statement_omp_atomic_store>::test (gimple gs) { return gs->code == GIMPLE_OMP_ATOMIC_STORE; } template <> template <> inline bool is_a_helper <gimple_statement_omp_return>::test (gimple gs) { return gs->code == GIMPLE_OMP_RETURN; } template <> template <> inline bool is_a_helper <gimple_statement_omp_continue>::test (gimple gs) { return gs->code == GIMPLE_OMP_CONTINUE; } template <> template <> inline bool is_a_helper <gimple_statement_omp_critical>::test (gimple gs) { return gs->code == GIMPLE_OMP_CRITICAL; } template <> template <> inline bool is_a_helper <gimple_statement_omp_for>::test (gimple gs) { return gs->code == GIMPLE_OMP_FOR; } template <> template <> inline bool is_a_helper <gimple_statement_omp_taskreg>::test (gimple gs) { return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK; } template <> template <> inline bool is_a_helper <gimple_statement_omp_parallel>::test (gimple gs) { return gs->code == GIMPLE_OMP_PARALLEL; } template <> template <> inline bool is_a_helper <gimple_statement_omp_target>::test (gimple gs) { return gs->code == GIMPLE_OMP_TARGET; } template <> template <> inline bool is_a_helper <gimple_statement_omp_sections>::test (gimple gs) { return gs->code == GIMPLE_OMP_SECTIONS; } template <> template <> inline bool is_a_helper <gimple_statement_omp_single>::test (gimple gs) { return gs->code == GIMPLE_OMP_SINGLE; } template <> template <> inline bool is_a_helper <gimple_statement_omp_teams>::test (gimple gs) { return gs->code == GIMPLE_OMP_TEAMS; } template <> template <> inline bool is_a_helper <gimple_statement_omp_task>::test (gimple gs) { return gs->code == GIMPLE_OMP_TASK; } template <> template <> inline bool is_a_helper <gimple_statement_phi>::test (gimple gs) { return gs->code == GIMPLE_PHI; } template <> template <> inline bool is_a_helper <gimple_statement_transaction>::test (gimple gs) { return gs->code == GIMPLE_TRANSACTION; } template <> template <> inline bool is_a_helper <gimple_statement_try>::test (gimple gs) { return gs->code == GIMPLE_TRY; } template <> template <> inline bool is_a_helper <gimple_statement_wce>::test (gimple gs) { return gs->code == GIMPLE_WITH_CLEANUP_EXPR; } template <> template <> inline bool is_a_helper <const gimple_statement_asm>::test (const_gimple gs) { return gs->code == GIMPLE_ASM; } template <> template <> inline bool is_a_helper <const gimple_statement_bind>::test (const_gimple gs) { return gs->code == GIMPLE_BIND; } template <> template <> inline bool is_a_helper <const gimple_statement_call>::test (const_gimple gs) { return gs->code == GIMPLE_CALL; } template <> template <> inline bool is_a_helper <const gimple_statement_catch>::test (const_gimple gs) { return gs->code == GIMPLE_CATCH; } template <> template <> inline bool is_a_helper <const gimple_statement_resx>::test (const_gimple gs) { return gs->code == GIMPLE_RESX; } template <> template <> inline bool is_a_helper <const gimple_statement_eh_dispatch>::test (const_gimple gs) { return gs->code == GIMPLE_EH_DISPATCH; } template <> template <> inline bool is_a_helper <const gimple_statement_eh_filter>::test (const_gimple gs) { return gs->code == GIMPLE_EH_FILTER; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_atomic_load>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_ATOMIC_LOAD; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_atomic_store>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_ATOMIC_STORE; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_return>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_RETURN; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_continue>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_CONTINUE; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_critical>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_CRITICAL; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_for>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_FOR; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_taskreg>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_PARALLEL || gs->code == GIMPLE_OMP_TASK; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_parallel>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_PARALLEL; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_target>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_TARGET; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_sections>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_SECTIONS; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_single>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_SINGLE; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_teams>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_TEAMS; } template <> template <> inline bool is_a_helper <const gimple_statement_omp_task>::test (const_gimple gs) { return gs->code == GIMPLE_OMP_TASK; } template <> template <> inline bool is_a_helper <const gimple_statement_phi>::test (const_gimple gs) { return gs->code == GIMPLE_PHI; } template <> template <> inline bool is_a_helper <const gimple_statement_transaction>::test (const_gimple gs) { return gs->code == GIMPLE_TRANSACTION; } /* Offset in bytes to the location of the operand vector. Zero if there is no operand vector for this tuple structure. */ extern size_t const gimple_ops_offset_[]; /* Map GIMPLE codes to GSS codes. */ extern enum gimple_statement_structure_enum const gss_for_code_[]; /* This variable holds the currently expanded gimple statement for purposes of comminucating the profile info to the builtin expanders. */ extern gimple currently_expanding_gimple_stmt; #define gimple_alloc(c, n) gimple_alloc_stat (c, n MEM_STAT_INFO) gimple gimple_alloc_stat (enum gimple_code, unsigned MEM_STAT_DECL); gimple gimple_build_return (tree); void gimple_call_reset_alias_info (gimple); gimple gimple_build_call_vec (tree, vec<tree> ); gimple gimple_build_call (tree, unsigned, ...); gimple gimple_build_call_valist (tree, unsigned, va_list); gimple gimple_build_call_internal (enum internal_fn, unsigned, ...); gimple gimple_build_call_internal_vec (enum internal_fn, vec<tree> ); gimple gimple_build_call_from_tree (tree); gimple gimple_build_assign_stat (tree, tree MEM_STAT_DECL); #define gimple_build_assign(l,r) gimple_build_assign_stat (l, r MEM_STAT_INFO) gimple gimple_build_assign_with_ops (enum tree_code, tree, tree, tree, tree CXX_MEM_STAT_INFO); gimple gimple_build_assign_with_ops (enum tree_code, tree, tree, tree CXX_MEM_STAT_INFO); gimple gimple_build_cond (enum tree_code, tree, tree, tree, tree); gimple gimple_build_cond_from_tree (tree, tree, tree); void gimple_cond_set_condition_from_tree (gimple, tree); gimple gimple_build_label (tree label); gimple gimple_build_goto (tree dest); gimple gimple_build_nop (void); gimple gimple_build_bind (tree, gimple_seq, tree); gimple gimple_build_asm_vec (const char *, vec<tree, va_gc> *, vec<tree, va_gc> *, vec<tree, va_gc> *, vec<tree, va_gc> *); gimple gimple_build_catch (tree, gimple_seq); gimple gimple_build_eh_filter (tree, gimple_seq); gimple gimple_build_eh_must_not_throw (tree); gimple gimple_build_eh_else (gimple_seq, gimple_seq); gimple_statement_try *gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags); gimple gimple_build_wce (gimple_seq); gimple gimple_build_resx (int); gimple gimple_build_switch_nlabels (unsigned, tree, tree); gimple gimple_build_switch (tree, tree, vec<tree> ); gimple gimple_build_eh_dispatch (int); gimple gimple_build_debug_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_bind(var,val,stmt) \ gimple_build_debug_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_debug_source_bind_stat (tree, tree, gimple MEM_STAT_DECL); #define gimple_build_debug_source_bind(var,val,stmt) \ gimple_build_debug_source_bind_stat ((var), (val), (stmt) MEM_STAT_INFO) gimple gimple_build_omp_critical (gimple_seq, tree); gimple gimple_build_omp_for (gimple_seq, int, tree, size_t, gimple_seq); gimple gimple_build_omp_parallel (gimple_seq, tree, tree, tree); gimple gimple_build_omp_task (gimple_seq, tree, tree, tree, tree, tree, tree); gimple gimple_build_omp_section (gimple_seq); gimple gimple_build_omp_master (gimple_seq); gimple gimple_build_omp_taskgroup (gimple_seq); gimple gimple_build_omp_continue (tree, tree); gimple gimple_build_omp_ordered (gimple_seq); gimple gimple_build_omp_return (bool); gimple gimple_build_omp_sections (gimple_seq, tree); gimple gimple_build_omp_sections_switch (void); gimple gimple_build_omp_single (gimple_seq, tree); gimple gimple_build_omp_target (gimple_seq, int, tree); gimple gimple_build_omp_teams (gimple_seq, tree); gimple gimple_build_omp_atomic_load (tree, tree); gimple gimple_build_omp_atomic_store (tree); gimple gimple_build_transaction (gimple_seq, tree); gimple gimple_build_predict (enum br_predictor, enum prediction); extern void gimple_seq_add_stmt (gimple_seq *, gimple); extern void gimple_seq_add_stmt_without_update (gimple_seq *, gimple); void gimple_seq_add_seq (gimple_seq *, gimple_seq); extern void annotate_all_with_location_after (gimple_seq, gimple_stmt_iterator, location_t); extern void annotate_all_with_location (gimple_seq, location_t); bool empty_body_p (gimple_seq); gimple_seq gimple_seq_copy (gimple_seq); bool gimple_call_same_target_p (const_gimple, const_gimple); int gimple_call_flags (const_gimple); int gimple_call_arg_flags (const_gimple, unsigned); int gimple_call_return_flags (const_gimple); bool gimple_assign_copy_p (gimple); bool gimple_assign_ssa_name_copy_p (gimple); bool gimple_assign_unary_nop_p (gimple); void gimple_set_bb (gimple, basic_block); void gimple_assign_set_rhs_from_tree (gimple_stmt_iterator *, tree); void gimple_assign_set_rhs_with_ops_1 (gimple_stmt_iterator *, enum tree_code, tree, tree, tree); tree gimple_get_lhs (const_gimple); void gimple_set_lhs (gimple, tree); gimple gimple_copy (gimple); bool gimple_has_side_effects (const_gimple); bool gimple_could_trap_p_1 (gimple, bool, bool); bool gimple_could_trap_p (gimple); bool gimple_assign_rhs_could_trap_p (gimple); extern void dump_gimple_statistics (void); unsigned get_gimple_rhs_num_ops (enum tree_code); extern tree canonicalize_cond_expr_cond (tree); gimple gimple_call_copy_skip_args (gimple, bitmap); extern bool gimple_compare_field_offset (tree, tree); extern tree gimple_unsigned_type (tree); extern tree gimple_signed_type (tree); extern alias_set_type gimple_get_alias_set (tree); extern bool gimple_ior_addresses_taken (bitmap, gimple); extern bool gimple_builtin_call_types_compatible_p (gimple, tree); extern bool gimple_call_builtin_p (gimple); extern bool gimple_call_builtin_p (gimple, enum built_in_class); extern bool gimple_call_builtin_p (gimple, enum built_in_function); extern bool gimple_asm_clobbers_memory_p (const_gimple); extern void dump_decl_set (FILE *, bitmap); extern bool nonfreeing_call_p (gimple); extern bool infer_nonnull_range (gimple, tree, bool, bool); extern void sort_case_labels (vec<tree> ); extern void preprocess_case_label_vec_for_gimple (vec<tree> , tree, tree *); extern void gimple_seq_set_location (gimple_seq , location_t); /* Formal (expression) temporary table handling: multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Get the number of the next statement uid to be allocated. */ static inline unsigned int gimple_stmt_max_uid (struct function *fn) { return fn->last_stmt_uid; } /* Set the number of the next statement uid to be allocated. */ static inline void set_gimple_stmt_max_uid (struct function *fn, unsigned int maxid) { fn->last_stmt_uid = maxid; } /* Set the number of the next statement uid to be allocated. */ static inline unsigned int inc_gimple_stmt_max_uid (struct function *fn) { return fn->last_stmt_uid++; } /* Return the first node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_first (gimple_seq s) { return s; } /* Return the first statement in GIMPLE sequence S. */ static inline gimple gimple_seq_first_stmt (gimple_seq s) { gimple_seq_node n = gimple_seq_first (s); return n; } /* Return the last node in GIMPLE sequence S. */ static inline gimple_seq_node gimple_seq_last (gimple_seq s) { return s ? s->prev : NULL; } /* Return the last statement in GIMPLE sequence S. */ static inline gimple gimple_seq_last_stmt (gimple_seq s) { gimple_seq_node n = gimple_seq_last (s); return n; } /* Set the last node in GIMPLE sequence *PS to LAST. */ static inline void gimple_seq_set_last (gimple_seq *ps, gimple_seq_node last) { (*ps)->prev = last; } /* Set the first node in GIMPLE sequence *PS to FIRST. */ static inline void gimple_seq_set_first (gimple_seq *ps, gimple_seq_node first) { *ps = first; } /* Return true if GIMPLE sequence S is empty. */ static inline bool gimple_seq_empty_p (gimple_seq s) { return s == NULL; } /* Allocate a new sequence and initialize its first element with STMT. */ static inline gimple_seq gimple_seq_alloc_with_stmt (gimple stmt) { gimple_seq seq = NULL; gimple_seq_add_stmt (&seq, stmt); return seq; } /* Returns the sequence of statements in BB. */ static inline gimple_seq bb_seq (const_basic_block bb) { return (!(bb->flags & BB_RTL)) ? bb->il.gimple.seq : NULL; } static inline gimple_seq * bb_seq_addr (basic_block bb) { return (!(bb->flags & BB_RTL)) ? &bb->il.gimple.seq : NULL; } /* Sets the sequence of statements in BB to SEQ. */ static inline void set_bb_seq (basic_block bb, gimple_seq seq) { gcc_checking_assert (!(bb->flags & BB_RTL)); bb->il.gimple.seq = seq; } /* Return the code for GIMPLE statement G. */ static inline enum gimple_code gimple_code (const_gimple g) { return g->code; } /* Return the GSS code used by a GIMPLE code. */ static inline enum gimple_statement_structure_enum gss_for_code (enum gimple_code code) { gcc_gimple_checking_assert ((unsigned int)code < LAST_AND_UNUSED_GIMPLE_CODE); return gss_for_code_[code]; } /* Return which GSS code is used by GS. */ static inline enum gimple_statement_structure_enum gimple_statement_structure (gimple gs) { return gss_for_code (gimple_code (gs)); } /* Return true if statement G has sub-statements. This is only true for High GIMPLE statements. */ static inline bool gimple_has_substatements (gimple g) { switch (gimple_code (g)) { case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_EH_ELSE: case GIMPLE_TRY: case GIMPLE_OMP_FOR: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TARGET: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_CRITICAL: case GIMPLE_WITH_CLEANUP_EXPR: case GIMPLE_TRANSACTION: return true; default: return false; } } /* Return the basic block holding statement G. */ static inline basic_block gimple_bb (const_gimple g) { return g->bb; } /* Return the lexical scope block holding statement G. */ static inline tree gimple_block (const_gimple g) { return LOCATION_BLOCK (g->location); } /* Set BLOCK to be the lexical scope block holding statement G. */ static inline void gimple_set_block (gimple g, tree block) { if (block) g->location = COMBINE_LOCATION_DATA (line_table, g->location, block); else g->location = LOCATION_LOCUS (g->location); } /* Return location information for statement G. */ static inline location_t gimple_location (const_gimple g) { return g->location; } /* Return pointer to location information for statement G. */ static inline const location_t * gimple_location_ptr (const_gimple g) { return &g->location; } /* Set location information for statement G. */ static inline void gimple_set_location (gimple g, location_t location) { g->location = location; } /* Return true if G contains location information. */ static inline bool gimple_has_location (const_gimple g) { return LOCATION_LOCUS (gimple_location (g)) != UNKNOWN_LOCATION; } /* Return the file name of the location of STMT. */ static inline const char * gimple_filename (const_gimple stmt) { return LOCATION_FILE (gimple_location (stmt)); } /* Return the line number of the location of STMT. */ static inline int gimple_lineno (const_gimple stmt) { return LOCATION_LINE (gimple_location (stmt)); } /* Determine whether SEQ is a singleton. */ static inline bool gimple_seq_singleton_p (gimple_seq seq) { return ((gimple_seq_first (seq) != NULL) && (gimple_seq_first (seq) == gimple_seq_last (seq))); } /* Return true if no warnings should be emitted for statement STMT. */ static inline bool gimple_no_warning_p (const_gimple stmt) { return stmt->no_warning; } /* Set the no_warning flag of STMT to NO_WARNING. */ static inline void gimple_set_no_warning (gimple stmt, bool no_warning) { stmt->no_warning = (unsigned) no_warning; } /* Set the visited status on statement STMT to VISITED_P. */ static inline void gimple_set_visited (gimple stmt, bool visited_p) { stmt->visited = (unsigned) visited_p; } /* Return the visited status for statement STMT. */ static inline bool gimple_visited_p (gimple stmt) { return stmt->visited; } /* Set pass local flag PLF on statement STMT to VAL_P. */ static inline void gimple_set_plf (gimple stmt, enum plf_mask plf, bool val_p) { if (val_p) stmt->plf |= (unsigned int) plf; else stmt->plf &= ~((unsigned int) plf); } /* Return the value of pass local flag PLF on statement STMT. */ static inline unsigned int gimple_plf (gimple stmt, enum plf_mask plf) { return stmt->plf & ((unsigned int) plf); } /* Set the UID of statement. */ static inline void gimple_set_uid (gimple g, unsigned uid) { g->uid = uid; } /* Return the UID of statement. */ static inline unsigned gimple_uid (const_gimple g) { return g->uid; } /* Make statement G a singleton sequence. */ static inline void gimple_init_singleton (gimple g) { g->next = NULL; g->prev = g; } /* Return true if GIMPLE statement G has register or memory operands. */ static inline bool gimple_has_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_COND && gimple_code (g) <= GIMPLE_RETURN; } template <> template <> inline bool is_a_helper <const gimple_statement_with_ops>::test (const_gimple gs) { return gimple_has_ops (gs); } template <> template <> inline bool is_a_helper <gimple_statement_with_ops>::test (gimple gs) { return gimple_has_ops (gs); } /* Return true if GIMPLE statement G has memory operands. */ static inline bool gimple_has_mem_ops (const_gimple g) { return gimple_code (g) >= GIMPLE_ASSIGN && gimple_code (g) <= GIMPLE_RETURN; } template <> template <> inline bool is_a_helper <const gimple_statement_with_memory_ops>::test (const_gimple gs) { return gimple_has_mem_ops (gs); } template <> template <> inline bool is_a_helper <gimple_statement_with_memory_ops>::test (gimple gs) { return gimple_has_mem_ops (gs); } /* Return the set of USE operands for statement G. */ static inline struct use_optype_d * gimple_use_ops (const_gimple g) { const gimple_statement_with_ops *ops_stmt = dyn_cast <const gimple_statement_with_ops> (g); if (!ops_stmt) return NULL; return ops_stmt->use_ops; } /* Set USE to be the set of USE operands for statement G. */ static inline void gimple_set_use_ops (gimple g, struct use_optype_d *use) { gimple_statement_with_ops *ops_stmt = as_a <gimple_statement_with_ops> (g); ops_stmt->use_ops = use; } /* Return the single VUSE operand of the statement G. */ static inline tree gimple_vuse (const_gimple g) { const gimple_statement_with_memory_ops *mem_ops_stmt = dyn_cast <const gimple_statement_with_memory_ops> (g); if (!mem_ops_stmt) return NULL_TREE; return mem_ops_stmt->vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree gimple_vdef (const_gimple g) { const gimple_statement_with_memory_ops *mem_ops_stmt = dyn_cast <const gimple_statement_with_memory_ops> (g); if (!mem_ops_stmt) return NULL_TREE; return mem_ops_stmt->vdef; } /* Return the single VUSE operand of the statement G. */ static inline tree * gimple_vuse_ptr (gimple g) { gimple_statement_with_memory_ops *mem_ops_stmt = dyn_cast <gimple_statement_with_memory_ops> (g); if (!mem_ops_stmt) return NULL; return &mem_ops_stmt->vuse; } /* Return the single VDEF operand of the statement G. */ static inline tree * gimple_vdef_ptr (gimple g) { gimple_statement_with_memory_ops *mem_ops_stmt = dyn_cast <gimple_statement_with_memory_ops> (g); if (!mem_ops_stmt) return NULL; return &mem_ops_stmt->vdef; } /* Set the single VUSE operand of the statement G. */ static inline void gimple_set_vuse (gimple g, tree vuse) { gimple_statement_with_memory_ops *mem_ops_stmt = as_a <gimple_statement_with_memory_ops> (g); mem_ops_stmt->vuse = vuse; } /* Set the single VDEF operand of the statement G. */ static inline void gimple_set_vdef (gimple g, tree vdef) { gimple_statement_with_memory_ops *mem_ops_stmt = as_a <gimple_statement_with_memory_ops> (g); mem_ops_stmt->vdef = vdef; } /* Return true if statement G has operands and the modified field has been set. */ static inline bool gimple_modified_p (const_gimple g) { return (gimple_has_ops (g)) ? (bool) g->modified : false; } /* Set the MODIFIED flag to MODIFIEDP, iff the gimple statement G has a MODIFIED field. */ static inline void gimple_set_modified (gimple s, bool modifiedp) { if (gimple_has_ops (s)) s->modified = (unsigned) modifiedp; } /* Return the tree code for the expression computed by STMT. This is only valid for GIMPLE_COND, GIMPLE_CALL and GIMPLE_ASSIGN. For GIMPLE_CALL, return CALL_EXPR as the expression code for consistency. This is useful when the caller needs to deal with the three kinds of computation that GIMPLE supports. */ static inline enum tree_code gimple_expr_code (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_COND) return (enum tree_code) stmt->subcode; else { gcc_gimple_checking_assert (code == GIMPLE_CALL); return CALL_EXPR; } } /* Return true if statement STMT contains volatile operands. */ static inline bool gimple_has_volatile_ops (const_gimple stmt) { if (gimple_has_mem_ops (stmt)) return stmt->has_volatile_ops; else return false; } /* Set the HAS_VOLATILE_OPS flag to VOLATILEP. */ static inline void gimple_set_has_volatile_ops (gimple stmt, bool volatilep) { if (gimple_has_mem_ops (stmt)) stmt->has_volatile_ops = (unsigned) volatilep; } /* Return true if STMT is in a transaction. */ static inline bool gimple_in_transaction (gimple stmt) { return bb_in_transaction (gimple_bb (stmt)); } /* Return true if statement STMT may access memory. */ static inline bool gimple_references_memory_p (gimple stmt) { return gimple_has_mem_ops (stmt) && gimple_vuse (stmt); } /* Return the subcode for OMP statement S. */ static inline unsigned gimple_omp_subcode (const_gimple s) { gcc_gimple_checking_assert (gimple_code (s) >= GIMPLE_OMP_ATOMIC_LOAD && gimple_code (s) <= GIMPLE_OMP_TEAMS); return s->subcode; } /* Set the subcode for OMP statement S to SUBCODE. */ static inline void gimple_omp_set_subcode (gimple s, unsigned int subcode) { /* We only have 16 bits for the subcode. Assert that we are not overflowing it. */ gcc_gimple_checking_assert (subcode < (1 << 16)); s->subcode = subcode; } /* Set the nowait flag on OMP_RETURN statement S. */ static inline void gimple_omp_return_set_nowait (gimple s) { GIMPLE_CHECK (s, GIMPLE_OMP_RETURN); s->subcode |= GF_OMP_RETURN_NOWAIT; } /* Return true if OMP return statement G has the GF_OMP_RETURN_NOWAIT flag set. */ static inline bool gimple_omp_return_nowait_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_RETURN); return (gimple_omp_subcode (g) & GF_OMP_RETURN_NOWAIT) != 0; } /* Set the LHS of OMP return. */ static inline void gimple_omp_return_set_lhs (gimple g, tree lhs) { gimple_statement_omp_return *omp_return_stmt = as_a <gimple_statement_omp_return> (g); omp_return_stmt->val = lhs; } /* Get the LHS of OMP return. */ static inline tree gimple_omp_return_lhs (const_gimple g) { const gimple_statement_omp_return *omp_return_stmt = as_a <const gimple_statement_omp_return> (g); return omp_return_stmt->val; } /* Return a pointer to the LHS of OMP return. */ static inline tree * gimple_omp_return_lhs_ptr (gimple g) { gimple_statement_omp_return *omp_return_stmt = as_a <gimple_statement_omp_return> (g); return &omp_return_stmt->val; } /* Return true if OMP section statement G has the GF_OMP_SECTION_LAST flag set. */ static inline bool gimple_omp_section_last_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); return (gimple_omp_subcode (g) & GF_OMP_SECTION_LAST) != 0; } /* Set the GF_OMP_SECTION_LAST flag on G. */ static inline void gimple_omp_section_set_last (gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_SECTION); g->subcode |= GF_OMP_SECTION_LAST; } /* Return true if OMP parallel statement G has the GF_OMP_PARALLEL_COMBINED flag set. */ static inline bool gimple_omp_parallel_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); return (gimple_omp_subcode (g) & GF_OMP_PARALLEL_COMBINED) != 0; } /* Set the GF_OMP_PARALLEL_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_parallel_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_PARALLEL); if (combined_p) g->subcode |= GF_OMP_PARALLEL_COMBINED; else g->subcode &= ~GF_OMP_PARALLEL_COMBINED; } /* Return true if OMP atomic load/store statement G has the GF_OMP_ATOMIC_NEED_VALUE flag set. */ static inline bool gimple_omp_atomic_need_value_p (const_gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_NEED_VALUE) != 0; } /* Set the GF_OMP_ATOMIC_NEED_VALUE flag on G. */ static inline void gimple_omp_atomic_set_need_value (gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->subcode |= GF_OMP_ATOMIC_NEED_VALUE; } /* Return true if OMP atomic load/store statement G has the GF_OMP_ATOMIC_SEQ_CST flag set. */ static inline bool gimple_omp_atomic_seq_cst_p (const_gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); return (gimple_omp_subcode (g) & GF_OMP_ATOMIC_SEQ_CST) != 0; } /* Set the GF_OMP_ATOMIC_SEQ_CST flag on G. */ static inline void gimple_omp_atomic_set_seq_cst (gimple g) { if (gimple_code (g) != GIMPLE_OMP_ATOMIC_LOAD) GIMPLE_CHECK (g, GIMPLE_OMP_ATOMIC_STORE); g->subcode |= GF_OMP_ATOMIC_SEQ_CST; } /* Return the number of operands for statement GS. */ static inline unsigned gimple_num_ops (const_gimple gs) { return gs->num_ops; } /* Set the number of operands for statement GS. */ static inline void gimple_set_num_ops (gimple gs, unsigned num_ops) { gs->num_ops = num_ops; } /* Return the array of operands for statement GS. */ static inline tree * gimple_ops (gimple gs) { size_t off; /* All the tuples have their operand vector at the very bottom of the structure. Note that those structures that do not have an operand vector have a zero offset. */ off = gimple_ops_offset_[gimple_statement_structure (gs)]; gcc_gimple_checking_assert (off != 0); return (tree *) ((char *) gs + off); } /* Return operand I for statement GS. */ static inline tree gimple_op (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs))[i]; } else return NULL_TREE; } /* Return a pointer to operand I for statement GS. */ static inline tree * gimple_op_ptr (const_gimple gs, unsigned i) { if (gimple_has_ops (gs)) { gcc_gimple_checking_assert (i < gimple_num_ops (gs)); return gimple_ops (CONST_CAST_GIMPLE (gs)) + i; } else return NULL; } /* Set operand I of statement GS to OP. */ static inline void gimple_set_op (gimple gs, unsigned i, tree op) { gcc_gimple_checking_assert (gimple_has_ops (gs) && i < gimple_num_ops (gs)); /* Note. It may be tempting to assert that OP matches is_gimple_operand, but that would be wrong. Different tuples accept slightly different sets of tree operands. Each caller should perform its own validation. */ gimple_ops (gs)[i] = op; } /* Return true if GS is a GIMPLE_ASSIGN. */ static inline bool is_gimple_assign (const_gimple gs) { return gimple_code (gs) == GIMPLE_ASSIGN; } /* Determine if expression CODE is one of the valid expressions that can be used on the RHS of GIMPLE assignments. */ static inline enum gimple_rhs_class get_gimple_rhs_class (enum tree_code code) { return (enum gimple_rhs_class) gimple_rhs_class_table[(int) code]; } /* Return the LHS of assignment statement GS. */ static inline tree gimple_assign_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 0); } /* Return a pointer to the LHS of assignment statement GS. */ static inline tree * gimple_assign_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of assignment statement GS. */ static inline void gimple_assign_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return the first operand on the RHS of assignment statement GS. */ static inline tree gimple_assign_rhs1 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op (gs, 1); } /* Return a pointer to the first operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs1_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 1); } /* Set RHS to be the first operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs1 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 1, rhs); } /* Return the second operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs2 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 3) return gimple_op (gs, 2); else return NULL_TREE; } /* Return a pointer to the second operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs2_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 2); } /* Set RHS to be the second operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs2 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 2, rhs); } /* Return the third operand on the RHS of assignment statement GS. If GS does not have two operands, NULL is returned instead. */ static inline tree gimple_assign_rhs3 (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); if (gimple_num_ops (gs) >= 4) return gimple_op (gs, 3); else return NULL_TREE; } /* Return a pointer to the third operand on the RHS of assignment statement GS. */ static inline tree * gimple_assign_rhs3_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gimple_op_ptr (gs, 3); } /* Set RHS to be the third operand on the RHS of assignment statement GS. */ static inline void gimple_assign_set_rhs3 (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gimple_set_op (gs, 3, rhs); } /* A wrapper around gimple_assign_set_rhs_with_ops_1, for callers which expect to see only a maximum of two operands. */ static inline void gimple_assign_set_rhs_with_ops (gimple_stmt_iterator *gsi, enum tree_code code, tree op1, tree op2) { gimple_assign_set_rhs_with_ops_1 (gsi, code, op1, op2, NULL); } /* Returns true if GS is a nontemporal move. */ static inline bool gimple_assign_nontemporal_move_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); return gs->nontemporal_move; } /* Sets nontemporal move flag of GS to NONTEMPORAL. */ static inline void gimple_assign_set_nontemporal_move (gimple gs, bool nontemporal) { GIMPLE_CHECK (gs, GIMPLE_ASSIGN); gs->nontemporal_move = nontemporal; } /* Return the code of the expression computed on the rhs of assignment statement GS. In case that the RHS is a single object, returns the tree code of the object. */ static inline enum tree_code gimple_assign_rhs_code (const_gimple gs) { enum tree_code code; GIMPLE_CHECK (gs, GIMPLE_ASSIGN); code = (enum tree_code) gs->subcode; /* While we initially set subcode to the TREE_CODE of the rhs for GIMPLE_SINGLE_RHS assigns we do not update that subcode to stay in sync when we rewrite stmts into SSA form or do SSA propagations. */ if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS) code = TREE_CODE (gimple_assign_rhs1 (gs)); return code; } /* Set CODE to be the code for the expression computed on the RHS of assignment S. */ static inline void gimple_assign_set_rhs_code (gimple s, enum tree_code code) { GIMPLE_CHECK (s, GIMPLE_ASSIGN); s->subcode = code; } /* Return the gimple rhs class of the code of the expression computed on the rhs of assignment statement GS. This will never return GIMPLE_INVALID_RHS. */ static inline enum gimple_rhs_class gimple_assign_rhs_class (const_gimple gs) { return get_gimple_rhs_class (gimple_assign_rhs_code (gs)); } /* Return true if GS is an assignment with a singleton RHS, i.e., there is no operator associated with the assignment itself. Unlike gimple_assign_copy_p, this predicate returns true for any RHS operand, including those that perform an operation and do not have the semantics of a copy, such as COND_EXPR. */ static inline bool gimple_assign_single_p (gimple gs) { return (is_gimple_assign (gs) && gimple_assign_rhs_class (gs) == GIMPLE_SINGLE_RHS); } /* Return true if GS performs a store to its lhs. */ static inline bool gimple_store_p (gimple gs) { tree lhs = gimple_get_lhs (gs); return lhs && !is_gimple_reg (lhs); } /* Return true if GS is an assignment that loads from its rhs1. */ static inline bool gimple_assign_load_p (gimple gs) { tree rhs; if (!gimple_assign_single_p (gs)) return false; rhs = gimple_assign_rhs1 (gs); if (TREE_CODE (rhs) == WITH_SIZE_EXPR) return true; rhs = get_base_address (rhs); return (DECL_P (rhs) || TREE_CODE (rhs) == MEM_REF || TREE_CODE (rhs) == TARGET_MEM_REF); } /* Return true if S is a type-cast assignment. */ static inline bool gimple_assign_cast_p (gimple s) { if (is_gimple_assign (s)) { enum tree_code sc = gimple_assign_rhs_code (s); return CONVERT_EXPR_CODE_P (sc) || sc == VIEW_CONVERT_EXPR || sc == FIX_TRUNC_EXPR; } return false; } /* Return true if S is a clobber statement. */ static inline bool gimple_clobber_p (gimple s) { return gimple_assign_single_p (s) && TREE_CLOBBER_P (gimple_assign_rhs1 (s)); } /* Return true if GS is a GIMPLE_CALL. */ static inline bool is_gimple_call (const_gimple gs) { return gimple_code (gs) == GIMPLE_CALL; } /* Return the LHS of call statement GS. */ static inline tree gimple_call_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 0); } /* Return a pointer to the LHS of call statement GS. */ static inline tree * gimple_call_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of call statement GS. */ static inline void gimple_call_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 0, lhs); if (lhs && TREE_CODE (lhs) == SSA_NAME) SSA_NAME_DEF_STMT (lhs) = gs; } /* Return true if call GS calls an internal-only function, as enumerated by internal_fn. */ static inline bool gimple_call_internal_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return (gs->subcode & GF_CALL_INTERNAL) != 0; } /* Return the target of internal call GS. */ static inline enum internal_fn gimple_call_internal_fn (const_gimple gs) { gcc_gimple_checking_assert (gimple_call_internal_p (gs)); return static_cast <const gimple_statement_call *> (gs)->u.internal_fn; } /* If CTRL_ALTERING_P is true, mark GIMPLE_CALL S to be a stmt that could alter control flow. */ static inline void gimple_call_set_ctrl_altering (gimple s, bool ctrl_altering_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (ctrl_altering_p) s->subcode |= GF_CALL_CTRL_ALTERING; else s->subcode &= ~GF_CALL_CTRL_ALTERING; } /* Return true if call GS calls an func whose GF_CALL_CTRL_ALTERING flag is set. Such call could not be a stmt in the middle of a bb. */ static inline bool gimple_call_ctrl_altering_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return (gs->subcode & GF_CALL_CTRL_ALTERING) != 0; } /* Return the function type of the function called by GS. */ static inline tree gimple_call_fntype (const_gimple gs) { const gimple_statement_call *call_stmt = as_a <const gimple_statement_call> (gs); if (gimple_call_internal_p (gs)) return NULL_TREE; return call_stmt->u.fntype; } /* Set the type of the function called by GS to FNTYPE. */ static inline void gimple_call_set_fntype (gimple gs, tree fntype) { gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); call_stmt->u.fntype = fntype; } /* Return the tree node representing the function called by call statement GS. */ static inline tree gimple_call_fn (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 1); } /* Return a pointer to the tree node representing the function called by call statement GS. */ static inline tree * gimple_call_fn_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 1); } /* Set FN to be the function called by call statement GS. */ static inline void gimple_call_set_fn (gimple gs, tree fn) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, fn); } /* Set FNDECL to be the function called by call statement GS. */ static inline void gimple_call_set_fndecl (gimple gs, tree decl) { GIMPLE_CHECK (gs, GIMPLE_CALL); gcc_gimple_checking_assert (!gimple_call_internal_p (gs)); gimple_set_op (gs, 1, build_fold_addr_expr_loc (gimple_location (gs), decl)); } /* Set internal function FN to be the function called by call statement GS. */ static inline void gimple_call_set_internal_fn (gimple gs, enum internal_fn fn) { gimple_statement_call *call_stmt = as_a <gimple_statement_call> (gs); gcc_gimple_checking_assert (gimple_call_internal_p (gs)); call_stmt->u.internal_fn = fn; } /* If a given GIMPLE_CALL's callee is a FUNCTION_DECL, return it. Otherwise return NULL. This function is analogous to get_callee_fndecl in tree land. */ static inline tree gimple_call_fndecl (const_gimple gs) { return gimple_call_addr_fndecl (gimple_call_fn (gs)); } /* Return the type returned by call statement GS. */ static inline tree gimple_call_return_type (const_gimple gs) { tree type = gimple_call_fntype (gs); if (type == NULL_TREE) return TREE_TYPE (gimple_call_lhs (gs)); /* The type returned by a function is the type of its function type. */ return TREE_TYPE (type); } /* Return the static chain for call statement GS. */ static inline tree gimple_call_chain (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, 2); } /* Return a pointer to the static chain for call statement GS. */ static inline tree * gimple_call_chain_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, 2); } /* Set CHAIN to be the static chain for call statement GS. */ static inline void gimple_call_set_chain (gimple gs, tree chain) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, 2, chain); } /* Return the number of arguments used by call statement GS. */ static inline unsigned gimple_call_num_args (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_CALL); num_ops = gimple_num_ops (gs); return num_ops - 3; } /* Return the argument at position INDEX for call statement GS. */ static inline tree gimple_call_arg (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op (gs, index + 3); } /* Return a pointer to the argument at position INDEX for call statement GS. */ static inline tree * gimple_call_arg_ptr (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_CALL); return gimple_op_ptr (gs, index + 3); } /* Set ARG to be the argument at position INDEX for call statement GS. */ static inline void gimple_call_set_arg (gimple gs, unsigned index, tree arg) { GIMPLE_CHECK (gs, GIMPLE_CALL); gimple_set_op (gs, index + 3, arg); } /* If TAIL_P is true, mark call statement S as being a tail call (i.e., a call just before the exit of a function). These calls are candidate for tail call optimization. */ static inline void gimple_call_set_tail (gimple s, bool tail_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (tail_p) s->subcode |= GF_CALL_TAILCALL; else s->subcode &= ~GF_CALL_TAILCALL; } /* Return true if GIMPLE_CALL S is marked as a tail call. */ static inline bool gimple_call_tail_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->subcode & GF_CALL_TAILCALL) != 0; } /* If RETURN_SLOT_OPT_P is true mark GIMPLE_CALL S as valid for return slot optimization. This transformation uses the target of the call expansion as the return slot for calls that return in memory. */ static inline void gimple_call_set_return_slot_opt (gimple s, bool return_slot_opt_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (return_slot_opt_p) s->subcode |= GF_CALL_RETURN_SLOT_OPT; else s->subcode &= ~GF_CALL_RETURN_SLOT_OPT; } /* Return true if S is marked for return slot optimization. */ static inline bool gimple_call_return_slot_opt_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->subcode & GF_CALL_RETURN_SLOT_OPT) != 0; } /* If FROM_THUNK_P is true, mark GIMPLE_CALL S as being the jump from a thunk to the thunked-to function. */ static inline void gimple_call_set_from_thunk (gimple s, bool from_thunk_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (from_thunk_p) s->subcode |= GF_CALL_FROM_THUNK; else s->subcode &= ~GF_CALL_FROM_THUNK; } /* Return true if GIMPLE_CALL S is a jump from a thunk. */ static inline bool gimple_call_from_thunk_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->subcode & GF_CALL_FROM_THUNK) != 0; } /* If PASS_ARG_PACK_P is true, GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline void gimple_call_set_va_arg_pack (gimple s, bool pass_arg_pack_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (pass_arg_pack_p) s->subcode |= GF_CALL_VA_ARG_PACK; else s->subcode &= ~GF_CALL_VA_ARG_PACK; } /* Return true if GIMPLE_CALL S is a stdarg call that needs the argument pack in its argument list. */ static inline bool gimple_call_va_arg_pack_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->subcode & GF_CALL_VA_ARG_PACK) != 0; } /* Return true if S is a noreturn call. */ static inline bool gimple_call_noreturn_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NORETURN) != 0; } /* If NOTHROW_P is true, GIMPLE_CALL S is a call that is known to not throw even if the called function can throw in other cases. */ static inline void gimple_call_set_nothrow (gimple s, bool nothrow_p) { GIMPLE_CHECK (s, GIMPLE_CALL); if (nothrow_p) s->subcode |= GF_CALL_NOTHROW; else s->subcode &= ~GF_CALL_NOTHROW; } /* Return true if S is a nothrow call. */ static inline bool gimple_call_nothrow_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } /* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that is known to be emitted for VLA objects. Those are wrapped by stack_save/stack_restore calls and hence can't lead to unbounded stack growth even when they occur in loops. */ static inline void gimple_call_set_alloca_for_var (gimple s, bool for_var) { GIMPLE_CHECK (s, GIMPLE_CALL); if (for_var) s->subcode |= GF_CALL_ALLOCA_FOR_VAR; else s->subcode &= ~GF_CALL_ALLOCA_FOR_VAR; } /* Return true of S is a call to builtin_alloca emitted for VLA objects. */ static inline bool gimple_call_alloca_for_var_p (gimple s) { GIMPLE_CHECK (s, GIMPLE_CALL); return (s->subcode & GF_CALL_ALLOCA_FOR_VAR) != 0; } /* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. */ static inline void gimple_call_copy_flags (gimple dest_call, gimple orig_call) { GIMPLE_CHECK (dest_call, GIMPLE_CALL); GIMPLE_CHECK (orig_call, GIMPLE_CALL); dest_call->subcode = orig_call->subcode; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_use_set (gimple call) { gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call); return &call_stmt->call_used; } /* Return a pointer to the points-to solution for the set of call-used variables of the call CALL. */ static inline struct pt_solution * gimple_call_clobber_set (gimple call) { gimple_statement_call *call_stmt = as_a <gimple_statement_call> (call); return &call_stmt->call_clobbered; } /* Returns true if this is a GIMPLE_ASSIGN or a GIMPLE_CALL with a non-NULL lhs. */ static inline bool gimple_has_lhs (gimple stmt) { return (is_gimple_assign (stmt) || (is_gimple_call (stmt) && gimple_call_lhs (stmt) != NULL_TREE)); } /* Return the code of the predicate computed by conditional statement GS. */ static inline enum tree_code gimple_cond_code (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return (enum tree_code) gs->subcode; } /* Set CODE to be the predicate code for the conditional statement GS. */ static inline void gimple_cond_set_code (gimple gs, enum tree_code code) { GIMPLE_CHECK (gs, GIMPLE_COND); gs->subcode = code; } /* Return the LHS of the predicate computed by conditional statement GS. */ static inline tree gimple_cond_lhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 0); } /* Return the pointer to the LHS of the predicate computed by conditional statement GS. */ static inline tree * gimple_cond_lhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 0); } /* Set LHS to be the LHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_lhs (gimple gs, tree lhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 0, lhs); } /* Return the RHS operand of the predicate computed by conditional GS. */ static inline tree gimple_cond_rhs (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 1); } /* Return the pointer to the RHS operand of the predicate computed by conditional GS. */ static inline tree * gimple_cond_rhs_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op_ptr (gs, 1); } /* Set RHS to be the RHS operand of the predicate computed by conditional statement GS. */ static inline void gimple_cond_set_rhs (gimple gs, tree rhs) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 1, rhs); } /* Return the label used by conditional statement GS when its predicate evaluates to true. */ static inline tree gimple_cond_true_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 2); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to true. */ static inline void gimple_cond_set_true_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 2, label); } /* Set LABEL to be the label used by conditional statement GS when its predicate evaluates to false. */ static inline void gimple_cond_set_false_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_COND); gimple_set_op (gs, 3, label); } /* Return the label used by conditional statement GS when its predicate evaluates to false. */ static inline tree gimple_cond_false_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_COND); return gimple_op (gs, 3); } /* Set the conditional COND_STMT to be of the form 'if (1 == 0)'. */ static inline void gimple_cond_make_false (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_false_node); gs->subcode = EQ_EXPR; } /* Set the conditional COND_STMT to be of the form 'if (1 == 1)'. */ static inline void gimple_cond_make_true (gimple gs) { gimple_cond_set_lhs (gs, boolean_true_node); gimple_cond_set_rhs (gs, boolean_true_node); gs->subcode = EQ_EXPR; } /* Check if conditional statemente GS is of the form 'if (1 == 1)', 'if (0 == 0)', 'if (1 != 0)' or 'if (0 != 1)' */ static inline bool gimple_cond_true_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs != rhs) return true; if (code == EQ_EXPR && lhs == rhs) return true; return false; } /* Check if conditional statement GS is of the form 'if (1 != 1)', 'if (0 != 0)', 'if (1 == 0)' or 'if (0 == 1)' */ static inline bool gimple_cond_false_p (const_gimple gs) { tree lhs = gimple_cond_lhs (gs); tree rhs = gimple_cond_rhs (gs); enum tree_code code = gimple_cond_code (gs); if (lhs != boolean_true_node && lhs != boolean_false_node) return false; if (rhs != boolean_true_node && rhs != boolean_false_node) return false; if (code == NE_EXPR && lhs == rhs) return true; if (code == EQ_EXPR && lhs != rhs) return true; return false; } /* Set the code, LHS and RHS of GIMPLE_COND STMT from CODE, LHS and RHS. */ static inline void gimple_cond_set_condition (gimple stmt, enum tree_code code, tree lhs, tree rhs) { gimple_cond_set_code (stmt, code); gimple_cond_set_lhs (stmt, lhs); gimple_cond_set_rhs (stmt, rhs); } /* Return the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline tree gimple_label_label (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_LABEL); return gimple_op (gs, 0); } /* Set LABEL to be the LABEL_DECL node used by GIMPLE_LABEL statement GS. */ static inline void gimple_label_set_label (gimple gs, tree label) { GIMPLE_CHECK (gs, GIMPLE_LABEL); gimple_set_op (gs, 0, label); } /* Return the destination of the unconditional jump GS. */ static inline tree gimple_goto_dest (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_GOTO); return gimple_op (gs, 0); } /* Set DEST to be the destination of the unconditonal jump GS. */ static inline void gimple_goto_set_dest (gimple gs, tree dest) { GIMPLE_CHECK (gs, GIMPLE_GOTO); gimple_set_op (gs, 0, dest); } /* Return the variables declared in the GIMPLE_BIND statement GS. */ static inline tree gimple_bind_vars (const_gimple gs) { const gimple_statement_bind *bind_stmt = as_a <const gimple_statement_bind> (gs); return bind_stmt->vars; } /* Set VARS to be the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_vars (gimple gs, tree vars) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); bind_stmt->vars = vars; } /* Append VARS to the set of variables declared in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_append_vars (gimple gs, tree vars) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); bind_stmt->vars = chainon (bind_stmt->vars, vars); } static inline gimple_seq * gimple_bind_body_ptr (gimple gs) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); return &bind_stmt->body; } /* Return the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline gimple_seq gimple_bind_body (gimple gs) { return *gimple_bind_body_ptr (gs); } /* Set SEQ to be the GIMPLE sequence contained in the GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_body (gimple gs, gimple_seq seq) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); bind_stmt->body = seq; } /* Append a statement to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_stmt (gimple gs, gimple stmt) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); gimple_seq_add_stmt (&bind_stmt->body, stmt); } /* Append a sequence of statements to the end of a GIMPLE_BIND's body. */ static inline void gimple_bind_add_seq (gimple gs, gimple_seq seq) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); gimple_seq_add_seq (&bind_stmt->body, seq); } /* Return the TREE_BLOCK node associated with GIMPLE_BIND statement GS. This is analogous to the BIND_EXPR_BLOCK field in trees. */ static inline tree gimple_bind_block (const_gimple gs) { const gimple_statement_bind *bind_stmt = as_a <const gimple_statement_bind> (gs); return bind_stmt->block; } /* Set BLOCK to be the TREE_BLOCK node associated with GIMPLE_BIND statement GS. */ static inline void gimple_bind_set_block (gimple gs, tree block) { gimple_statement_bind *bind_stmt = as_a <gimple_statement_bind> (gs); gcc_gimple_checking_assert (block == NULL_TREE || TREE_CODE (block) == BLOCK); bind_stmt->block = block; } /* Return the number of input operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_ninputs (const_gimple gs) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); return asm_stmt->ni; } /* Return the number of output operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_noutputs (const_gimple gs) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); return asm_stmt->no; } /* Return the number of clobber operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nclobbers (const_gimple gs) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); return asm_stmt->nc; } /* Return the number of label operands for GIMPLE_ASM GS. */ static inline unsigned gimple_asm_nlabels (const_gimple gs) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); return asm_stmt->nl; } /* Return input operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_input_op (const_gimple gs, unsigned index) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->ni); return gimple_op (gs, index + asm_stmt->no); } /* Return a pointer to input operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_input_op_ptr (const_gimple gs, unsigned index) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->ni); return gimple_op_ptr (gs, index + asm_stmt->no); } /* Set IN_OP to be input operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_input_op (gimple gs, unsigned index, tree in_op) { gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->ni && TREE_CODE (in_op) == TREE_LIST); gimple_set_op (gs, index + asm_stmt->no, in_op); } /* Return output operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_output_op (const_gimple gs, unsigned index) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->no); return gimple_op (gs, index); } /* Return a pointer to output operand INDEX of GIMPLE_ASM GS. */ static inline tree * gimple_asm_output_op_ptr (const_gimple gs, unsigned index) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->no); return gimple_op_ptr (gs, index); } /* Set OUT_OP to be output operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_output_op (gimple gs, unsigned index, tree out_op) { gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->no && TREE_CODE (out_op) == TREE_LIST); gimple_set_op (gs, index, out_op); } /* Return clobber operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_clobber_op (const_gimple gs, unsigned index) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->nc); return gimple_op (gs, index + asm_stmt->ni + asm_stmt->no); } /* Set CLOBBER_OP to be clobber operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_clobber_op (gimple gs, unsigned index, tree clobber_op) { gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->nc && TREE_CODE (clobber_op) == TREE_LIST); gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->no, clobber_op); } /* Return label operand INDEX of GIMPLE_ASM GS. */ static inline tree gimple_asm_label_op (const_gimple gs, unsigned index) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->nl); return gimple_op (gs, index + asm_stmt->ni + asm_stmt->nc); } /* Set LABEL_OP to be label operand INDEX in GIMPLE_ASM GS. */ static inline void gimple_asm_set_label_op (gimple gs, unsigned index, tree label_op) { gimple_statement_asm *asm_stmt = as_a <gimple_statement_asm> (gs); gcc_gimple_checking_assert (index < asm_stmt->nl && TREE_CODE (label_op) == TREE_LIST); gimple_set_op (gs, index + asm_stmt->ni + asm_stmt->nc, label_op); } /* Return the string representing the assembly instruction in GIMPLE_ASM GS. */ static inline const char * gimple_asm_string (const_gimple gs) { const gimple_statement_asm *asm_stmt = as_a <const gimple_statement_asm> (gs); return asm_stmt->string; } /* Return true if GS is an asm statement marked volatile. */ static inline bool gimple_asm_volatile_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->subcode & GF_ASM_VOLATILE) != 0; } /* If VOLATLE_P is true, mark asm statement GS as volatile. */ static inline void gimple_asm_set_volatile (gimple gs, bool volatile_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (volatile_p) gs->subcode |= GF_ASM_VOLATILE; else gs->subcode &= ~GF_ASM_VOLATILE; } /* If INPUT_P is true, mark asm GS as an ASM_INPUT. */ static inline void gimple_asm_set_input (gimple gs, bool input_p) { GIMPLE_CHECK (gs, GIMPLE_ASM); if (input_p) gs->subcode |= GF_ASM_INPUT; else gs->subcode &= ~GF_ASM_INPUT; } /* Return true if asm GS is an ASM_INPUT. */ static inline bool gimple_asm_input_p (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_ASM); return (gs->subcode & GF_ASM_INPUT) != 0; } /* Return the types handled by GIMPLE_CATCH statement GS. */ static inline tree gimple_catch_types (const_gimple gs) { const gimple_statement_catch *catch_stmt = as_a <const gimple_statement_catch> (gs); return catch_stmt->types; } /* Return a pointer to the types handled by GIMPLE_CATCH statement GS. */ static inline tree * gimple_catch_types_ptr (gimple gs) { gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs); return &catch_stmt->types; } /* Return a pointer to the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq * gimple_catch_handler_ptr (gimple gs) { gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs); return &catch_stmt->handler; } /* Return the GIMPLE sequence representing the body of the handler of GIMPLE_CATCH statement GS. */ static inline gimple_seq gimple_catch_handler (gimple gs) { return *gimple_catch_handler_ptr (gs); } /* Set T to be the set of types handled by GIMPLE_CATCH GS. */ static inline void gimple_catch_set_types (gimple gs, tree t) { gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs); catch_stmt->types = t; } /* Set HANDLER to be the body of GIMPLE_CATCH GS. */ static inline void gimple_catch_set_handler (gimple gs, gimple_seq handler) { gimple_statement_catch *catch_stmt = as_a <gimple_statement_catch> (gs); catch_stmt->handler = handler; } /* Return the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree gimple_eh_filter_types (const_gimple gs) { const gimple_statement_eh_filter *eh_filter_stmt = as_a <const gimple_statement_eh_filter> (gs); return eh_filter_stmt->types; } /* Return a pointer to the types handled by GIMPLE_EH_FILTER statement GS. */ static inline tree * gimple_eh_filter_types_ptr (gimple gs) { gimple_statement_eh_filter *eh_filter_stmt = as_a <gimple_statement_eh_filter> (gs); return &eh_filter_stmt->types; } /* Return a pointer to the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq * gimple_eh_filter_failure_ptr (gimple gs) { gimple_statement_eh_filter *eh_filter_stmt = as_a <gimple_statement_eh_filter> (gs); return &eh_filter_stmt->failure; } /* Return the sequence of statement to execute when GIMPLE_EH_FILTER statement fails. */ static inline gimple_seq gimple_eh_filter_failure (gimple gs) { return *gimple_eh_filter_failure_ptr (gs); } /* Set TYPES to be the set of types handled by GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_types (gimple gs, tree types) { gimple_statement_eh_filter *eh_filter_stmt = as_a <gimple_statement_eh_filter> (gs); eh_filter_stmt->types = types; } /* Set FAILURE to be the sequence of statements to execute on failure for GIMPLE_EH_FILTER GS. */ static inline void gimple_eh_filter_set_failure (gimple gs, gimple_seq failure) { gimple_statement_eh_filter *eh_filter_stmt = as_a <gimple_statement_eh_filter> (gs); eh_filter_stmt->failure = failure; } /* Get the function decl to be called by the MUST_NOT_THROW region. */ static inline tree gimple_eh_must_not_throw_fndecl (gimple gs) { gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs); return eh_mnt_stmt->fndecl; } /* Set the function decl to be called by GS to DECL. */ static inline void gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl) { gimple_statement_eh_mnt *eh_mnt_stmt = as_a <gimple_statement_eh_mnt> (gs); eh_mnt_stmt->fndecl = decl; } /* GIMPLE_EH_ELSE accessors. */ static inline gimple_seq * gimple_eh_else_n_body_ptr (gimple gs) { gimple_statement_eh_else *eh_else_stmt = as_a <gimple_statement_eh_else> (gs); return &eh_else_stmt->n_body; } static inline gimple_seq gimple_eh_else_n_body (gimple gs) { return *gimple_eh_else_n_body_ptr (gs); } static inline gimple_seq * gimple_eh_else_e_body_ptr (gimple gs) { gimple_statement_eh_else *eh_else_stmt = as_a <gimple_statement_eh_else> (gs); return &eh_else_stmt->e_body; } static inline gimple_seq gimple_eh_else_e_body (gimple gs) { return *gimple_eh_else_e_body_ptr (gs); } static inline void gimple_eh_else_set_n_body (gimple gs, gimple_seq seq) { gimple_statement_eh_else *eh_else_stmt = as_a <gimple_statement_eh_else> (gs); eh_else_stmt->n_body = seq; } static inline void gimple_eh_else_set_e_body (gimple gs, gimple_seq seq) { gimple_statement_eh_else *eh_else_stmt = as_a <gimple_statement_eh_else> (gs); eh_else_stmt->e_body = seq; } /* GIMPLE_TRY accessors. */ /* Return the kind of try block represented by GIMPLE_TRY GS. This is either GIMPLE_TRY_CATCH or GIMPLE_TRY_FINALLY. */ static inline enum gimple_try_flags gimple_try_kind (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRY); return (enum gimple_try_flags) (gs->subcode & GIMPLE_TRY_KIND); } /* Set the kind of try block represented by GIMPLE_TRY GS. */ static inline void gimple_try_set_kind (gimple gs, enum gimple_try_flags kind) { GIMPLE_CHECK (gs, GIMPLE_TRY); gcc_gimple_checking_assert (kind == GIMPLE_TRY_CATCH || kind == GIMPLE_TRY_FINALLY); if (gimple_try_kind (gs) != kind) gs->subcode = (unsigned int) kind; } /* Return the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline bool gimple_try_catch_is_cleanup (const_gimple gs) { gcc_gimple_checking_assert (gimple_try_kind (gs) == GIMPLE_TRY_CATCH); return (gs->subcode & GIMPLE_TRY_CATCH_IS_CLEANUP) != 0; } /* Return a pointer to the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq * gimple_try_eval_ptr (gimple gs) { gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs); return &try_stmt->eval; } /* Return the sequence of statements used as the body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_eval (gimple gs) { return *gimple_try_eval_ptr (gs); } /* Return a pointer to the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq * gimple_try_cleanup_ptr (gimple gs) { gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs); return &try_stmt->cleanup; } /* Return the sequence of statements used as the cleanup body for GIMPLE_TRY GS. */ static inline gimple_seq gimple_try_cleanup (gimple gs) { return *gimple_try_cleanup_ptr (gs); } /* Set the GIMPLE_TRY_CATCH_IS_CLEANUP flag. */ static inline void gimple_try_set_catch_is_cleanup (gimple g, bool catch_is_cleanup) { gcc_gimple_checking_assert (gimple_try_kind (g) == GIMPLE_TRY_CATCH); if (catch_is_cleanup) g->subcode |= GIMPLE_TRY_CATCH_IS_CLEANUP; else g->subcode &= ~GIMPLE_TRY_CATCH_IS_CLEANUP; } /* Set EVAL to be the sequence of statements to use as the body for GIMPLE_TRY GS. */ static inline void gimple_try_set_eval (gimple gs, gimple_seq eval) { gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs); try_stmt->eval = eval; } /* Set CLEANUP to be the sequence of statements to use as the cleanup body for GIMPLE_TRY GS. */ static inline void gimple_try_set_cleanup (gimple gs, gimple_seq cleanup) { gimple_statement_try *try_stmt = as_a <gimple_statement_try> (gs); try_stmt->cleanup = cleanup; } /* Return a pointer to the cleanup sequence for cleanup statement GS. */ static inline gimple_seq * gimple_wce_cleanup_ptr (gimple gs) { gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs); return &wce_stmt->cleanup; } /* Return the cleanup sequence for cleanup statement GS. */ static inline gimple_seq gimple_wce_cleanup (gimple gs) { return *gimple_wce_cleanup_ptr (gs); } /* Set CLEANUP to be the cleanup sequence for GS. */ static inline void gimple_wce_set_cleanup (gimple gs, gimple_seq cleanup) { gimple_statement_wce *wce_stmt = as_a <gimple_statement_wce> (gs); wce_stmt->cleanup = cleanup; } /* Return the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline bool gimple_wce_cleanup_eh_only (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); return gs->subcode != 0; } /* Set the CLEANUP_EH_ONLY flag for a WCE tuple. */ static inline void gimple_wce_set_cleanup_eh_only (gimple gs, bool eh_only_p) { GIMPLE_CHECK (gs, GIMPLE_WITH_CLEANUP_EXPR); gs->subcode = (unsigned int) eh_only_p; } /* Return the maximum number of arguments supported by GIMPLE_PHI GS. */ static inline unsigned gimple_phi_capacity (const_gimple gs) { const gimple_statement_phi *phi_stmt = as_a <const gimple_statement_phi> (gs); return phi_stmt->capacity; } /* Return the number of arguments in GIMPLE_PHI GS. This must always be exactly the number of incoming edges for the basic block holding GS. */ static inline unsigned gimple_phi_num_args (const_gimple gs) { const gimple_statement_phi *phi_stmt = as_a <const gimple_statement_phi> (gs); return phi_stmt->nargs; } /* Return the SSA name created by GIMPLE_PHI GS. */ static inline tree gimple_phi_result (const_gimple gs) { const gimple_statement_phi *phi_stmt = as_a <const gimple_statement_phi> (gs); return phi_stmt->result; } /* Return a pointer to the SSA name created by GIMPLE_PHI GS. */ static inline tree * gimple_phi_result_ptr (gimple gs) { gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs); return &phi_stmt->result; } /* Set RESULT to be the SSA name created by GIMPLE_PHI GS. */ static inline void gimple_phi_set_result (gimple gs, tree result) { gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs); phi_stmt->result = result; if (result && TREE_CODE (result) == SSA_NAME) SSA_NAME_DEF_STMT (result) = gs; } /* Return the PHI argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline struct phi_arg_d * gimple_phi_arg (gimple gs, unsigned index) { gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs); gcc_gimple_checking_assert (index <= phi_stmt->capacity); return &(phi_stmt->args[index]); } /* Set PHIARG to be the argument corresponding to incoming edge INDEX for GIMPLE_PHI GS. */ static inline void gimple_phi_set_arg (gimple gs, unsigned index, struct phi_arg_d * phiarg) { gimple_statement_phi *phi_stmt = as_a <gimple_statement_phi> (gs); gcc_gimple_checking_assert (index <= phi_stmt->nargs); phi_stmt->args[index] = *phiarg; } /* Return the PHI nodes for basic block BB, or NULL if there are no PHI nodes. */ static inline gimple_seq phi_nodes (const_basic_block bb) { gcc_checking_assert (!(bb->flags & BB_RTL)); return bb->il.gimple.phi_nodes; } /* Return a pointer to the PHI nodes for basic block BB. */ static inline gimple_seq * phi_nodes_ptr (basic_block bb) { gcc_checking_assert (!(bb->flags & BB_RTL)); return &bb->il.gimple.phi_nodes; } /* Return the tree operand for argument I of PHI node GS. */ static inline tree gimple_phi_arg_def (gimple gs, size_t index) { return gimple_phi_arg (gs, index)->def; } /* Return a pointer to the tree operand for argument I of PHI node GS. */ static inline tree * gimple_phi_arg_def_ptr (gimple gs, size_t index) { return &gimple_phi_arg (gs, index)->def; } /* Return the edge associated with argument I of phi node GS. */ static inline edge gimple_phi_arg_edge (gimple gs, size_t i) { return EDGE_PRED (gimple_bb (gs), i); } /* Return the source location of gimple argument I of phi node GS. */ static inline source_location gimple_phi_arg_location (gimple gs, size_t i) { return gimple_phi_arg (gs, i)->locus; } /* Return the source location of the argument on edge E of phi node GS. */ static inline source_location gimple_phi_arg_location_from_edge (gimple gs, edge e) { return gimple_phi_arg (gs, e->dest_idx)->locus; } /* Set the source location of gimple argument I of phi node GS to LOC. */ static inline void gimple_phi_arg_set_location (gimple gs, size_t i, source_location loc) { gimple_phi_arg (gs, i)->locus = loc; } /* Return TRUE if argument I of phi node GS has a location record. */ static inline bool gimple_phi_arg_has_location (gimple gs, size_t i) { return gimple_phi_arg_location (gs, i) != UNKNOWN_LOCATION; } /* Return the region number for GIMPLE_RESX GS. */ static inline int gimple_resx_region (const_gimple gs) { const gimple_statement_resx *resx_stmt = as_a <const gimple_statement_resx> (gs); return resx_stmt->region; } /* Set REGION to be the region number for GIMPLE_RESX GS. */ static inline void gimple_resx_set_region (gimple gs, int region) { gimple_statement_resx *resx_stmt = as_a <gimple_statement_resx> (gs); resx_stmt->region = region; } /* Return the region number for GIMPLE_EH_DISPATCH GS. */ static inline int gimple_eh_dispatch_region (const_gimple gs) { const gimple_statement_eh_dispatch *eh_dispatch_stmt = as_a <const gimple_statement_eh_dispatch> (gs); return eh_dispatch_stmt->region; } /* Set REGION to be the region number for GIMPLE_EH_DISPATCH GS. */ static inline void gimple_eh_dispatch_set_region (gimple gs, int region) { gimple_statement_eh_dispatch *eh_dispatch_stmt = as_a <gimple_statement_eh_dispatch> (gs); eh_dispatch_stmt->region = region; } /* Return the number of labels associated with the switch statement GS. */ static inline unsigned gimple_switch_num_labels (const_gimple gs) { unsigned num_ops; GIMPLE_CHECK (gs, GIMPLE_SWITCH); num_ops = gimple_num_ops (gs); gcc_gimple_checking_assert (num_ops > 1); return num_ops - 1; } /* Set NLABELS to be the number of labels for the switch statement GS. */ static inline void gimple_switch_set_num_labels (gimple g, unsigned nlabels) { GIMPLE_CHECK (g, GIMPLE_SWITCH); gimple_set_num_ops (g, nlabels + 1); } /* Return the index variable used by the switch statement GS. */ static inline tree gimple_switch_index (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op (gs, 0); } /* Return a pointer to the index variable for the switch statement GS. */ static inline tree * gimple_switch_index_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); return gimple_op_ptr (gs, 0); } /* Set INDEX to be the index variable for switch statement GS. */ static inline void gimple_switch_set_index (gimple gs, tree index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (SSA_VAR_P (index) || CONSTANT_CLASS_P (index)); gimple_set_op (gs, 0, index); } /* Return the label numbered INDEX. The default label is 0, followed by any labels in a switch statement. */ static inline tree gimple_switch_label (const_gimple gs, unsigned index) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1); return gimple_op (gs, index + 1); } /* Set the label number INDEX to LABEL. 0 is always the default label. */ static inline void gimple_switch_set_label (gimple gs, unsigned index, tree label) { GIMPLE_CHECK (gs, GIMPLE_SWITCH); gcc_gimple_checking_assert (gimple_num_ops (gs) > index + 1 && (label == NULL_TREE || TREE_CODE (label) == CASE_LABEL_EXPR)); gimple_set_op (gs, index + 1, label); } /* Return the default label for a switch statement. */ static inline tree gimple_switch_default_label (const_gimple gs) { tree label = gimple_switch_label (gs, 0); gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label)); return label; } /* Set the default label for a switch statement. */ static inline void gimple_switch_set_default_label (gimple gs, tree label) { gcc_checking_assert (!CASE_LOW (label) && !CASE_HIGH (label)); gimple_switch_set_label (gs, 0, label); } /* Return true if GS is a GIMPLE_DEBUG statement. */ static inline bool is_gimple_debug (const_gimple gs) { return gimple_code (gs) == GIMPLE_DEBUG; } /* Return true if S is a GIMPLE_DEBUG BIND statement. */ static inline bool gimple_debug_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->subcode == GIMPLE_DEBUG_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree gimple_debug_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline tree * gimple_debug_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* The second operand of a GIMPLE_DEBUG_BIND, when the value was optimized away. */ #define GIMPLE_DEBUG_BIND_NOVALUE NULL_TREE /* error_mark_node */ /* Remove the value bound to the variable in a GIMPLE_DEBUG bind statement. */ static inline void gimple_debug_bind_reset_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); gimple_set_op (dbg, 1, GIMPLE_DEBUG_BIND_NOVALUE); } /* Return true if the GIMPLE_DEBUG bind statement is bound to a value. */ static inline bool gimple_debug_bind_has_value_p (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_bind_p (dbg)); return gimple_op (dbg, 1) != GIMPLE_DEBUG_BIND_NOVALUE; } #undef GIMPLE_DEBUG_BIND_NOVALUE /* Return true if S is a GIMPLE_DEBUG SOURCE BIND statement. */ static inline bool gimple_debug_source_bind_p (const_gimple s) { if (is_gimple_debug (s)) return s->subcode == GIMPLE_DEBUG_SOURCE_BIND; return false; } /* Return the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_var (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 0); } /* Return the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree gimple_debug_source_bind_get_value (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op (dbg, 1); } /* Return a pointer to the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline tree * gimple_debug_source_bind_get_value_ptr (gimple dbg) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); return gimple_op_ptr (dbg, 1); } /* Set the variable bound in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_var (gimple dbg, tree var) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 0, var); } /* Set the value bound to the variable in a GIMPLE_DEBUG source bind statement. */ static inline void gimple_debug_source_bind_set_value (gimple dbg, tree value) { GIMPLE_CHECK (dbg, GIMPLE_DEBUG); gcc_gimple_checking_assert (gimple_debug_source_bind_p (dbg)); gimple_set_op (dbg, 1, value); } /* Return the line number for EXPR, or return -1 if we have no line number information for it. */ static inline int get_lineno (const_gimple stmt) { location_t loc; if (!stmt) return -1; loc = gimple_location (stmt); if (loc == UNKNOWN_LOCATION) return -1; return LOCATION_LINE (loc); } /* Return a pointer to the body for the OMP statement GS. */ static inline gimple_seq * gimple_omp_body_ptr (gimple gs) { return &static_cast <gimple_statement_omp *> (gs)->body; } /* Return the body for the OMP statement GS. */ static inline gimple_seq gimple_omp_body (gimple gs) { return *gimple_omp_body_ptr (gs); } /* Set BODY to be the body for the OMP statement GS. */ static inline void gimple_omp_set_body (gimple gs, gimple_seq body) { static_cast <gimple_statement_omp *> (gs)->body = body; } /* Return the name associated with OMP_CRITICAL statement GS. */ static inline tree gimple_omp_critical_name (const_gimple gs) { const gimple_statement_omp_critical *omp_critical_stmt = as_a <const gimple_statement_omp_critical> (gs); return omp_critical_stmt->name; } /* Return a pointer to the name associated with OMP critical statement GS. */ static inline tree * gimple_omp_critical_name_ptr (gimple gs) { gimple_statement_omp_critical *omp_critical_stmt = as_a <gimple_statement_omp_critical> (gs); return &omp_critical_stmt->name; } /* Set NAME to be the name associated with OMP critical statement GS. */ static inline void gimple_omp_critical_set_name (gimple gs, tree name) { gimple_statement_omp_critical *omp_critical_stmt = as_a <gimple_statement_omp_critical> (gs); omp_critical_stmt->name = name; } /* Return the kind of OMP for statemement. */ static inline int gimple_omp_for_kind (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_FOR); return (gimple_omp_subcode (g) & GF_OMP_FOR_KIND_MASK); } /* Set the OMP for kind. */ static inline void gimple_omp_for_set_kind (gimple g, int kind) { GIMPLE_CHECK (g, GIMPLE_OMP_FOR); g->subcode = (g->subcode & ~GF_OMP_FOR_KIND_MASK) | (kind & GF_OMP_FOR_KIND_MASK); } /* Return true if OMP for statement G has the GF_OMP_FOR_COMBINED flag set. */ static inline bool gimple_omp_for_combined_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_FOR); return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED) != 0; } /* Set the GF_OMP_FOR_COMBINED field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_for_set_combined_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_FOR); if (combined_p) g->subcode |= GF_OMP_FOR_COMBINED; else g->subcode &= ~GF_OMP_FOR_COMBINED; } /* Return true if OMP for statement G has the GF_OMP_FOR_COMBINED_INTO flag set. */ static inline bool gimple_omp_for_combined_into_p (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_FOR); return (gimple_omp_subcode (g) & GF_OMP_FOR_COMBINED_INTO) != 0; } /* Set the GF_OMP_FOR_COMBINED_INTO field in G depending on the boolean value of COMBINED_P. */ static inline void gimple_omp_for_set_combined_into_p (gimple g, bool combined_p) { GIMPLE_CHECK (g, GIMPLE_OMP_FOR); if (combined_p) g->subcode |= GF_OMP_FOR_COMBINED_INTO; else g->subcode &= ~GF_OMP_FOR_COMBINED_INTO; } /* Return the clauses associated with OMP_FOR GS. */ static inline tree gimple_omp_for_clauses (const_gimple gs) { const gimple_statement_omp_for *omp_for_stmt = as_a <const gimple_statement_omp_for> (gs); return omp_for_stmt->clauses; } /* Return a pointer to the OMP_FOR GS. */ static inline tree * gimple_omp_for_clauses_ptr (gimple gs) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); return &omp_for_stmt->clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_FOR GS. */ static inline void gimple_omp_for_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); omp_for_stmt->clauses = clauses; } /* Get the collapse count of OMP_FOR GS. */ static inline size_t gimple_omp_for_collapse (gimple gs) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); return omp_for_stmt->collapse; } /* Return the index variable for OMP_FOR GS. */ static inline tree gimple_omp_for_index (const_gimple gs, size_t i) { const gimple_statement_omp_for *omp_for_stmt = as_a <const gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return omp_for_stmt->iter[i].index; } /* Return a pointer to the index variable for OMP_FOR GS. */ static inline tree * gimple_omp_for_index_ptr (gimple gs, size_t i) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return &omp_for_stmt->iter[i].index; } /* Set INDEX to be the index variable for OMP_FOR GS. */ static inline void gimple_omp_for_set_index (gimple gs, size_t i, tree index) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); omp_for_stmt->iter[i].index = index; } /* Return the initial value for OMP_FOR GS. */ static inline tree gimple_omp_for_initial (const_gimple gs, size_t i) { const gimple_statement_omp_for *omp_for_stmt = as_a <const gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return omp_for_stmt->iter[i].initial; } /* Return a pointer to the initial value for OMP_FOR GS. */ static inline tree * gimple_omp_for_initial_ptr (gimple gs, size_t i) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return &omp_for_stmt->iter[i].initial; } /* Set INITIAL to be the initial value for OMP_FOR GS. */ static inline void gimple_omp_for_set_initial (gimple gs, size_t i, tree initial) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); omp_for_stmt->iter[i].initial = initial; } /* Return the final value for OMP_FOR GS. */ static inline tree gimple_omp_for_final (const_gimple gs, size_t i) { const gimple_statement_omp_for *omp_for_stmt = as_a <const gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return omp_for_stmt->iter[i].final; } /* Return a pointer to the final value for OMP_FOR GS. */ static inline tree * gimple_omp_for_final_ptr (gimple gs, size_t i) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return &omp_for_stmt->iter[i].final; } /* Set FINAL to be the final value for OMP_FOR GS. */ static inline void gimple_omp_for_set_final (gimple gs, size_t i, tree final) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); omp_for_stmt->iter[i].final = final; } /* Return the increment value for OMP_FOR GS. */ static inline tree gimple_omp_for_incr (const_gimple gs, size_t i) { const gimple_statement_omp_for *omp_for_stmt = as_a <const gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return omp_for_stmt->iter[i].incr; } /* Return a pointer to the increment value for OMP_FOR GS. */ static inline tree * gimple_omp_for_incr_ptr (gimple gs, size_t i) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return &omp_for_stmt->iter[i].incr; } /* Set INCR to be the increment value for OMP_FOR GS. */ static inline void gimple_omp_for_set_incr (gimple gs, size_t i, tree incr) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); omp_for_stmt->iter[i].incr = incr; } /* Return a pointer to the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq * gimple_omp_for_pre_body_ptr (gimple gs) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); return &omp_for_stmt->pre_body; } /* Return the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline gimple_seq gimple_omp_for_pre_body (gimple gs) { return *gimple_omp_for_pre_body_ptr (gs); } /* Set PRE_BODY to be the sequence of statements to execute before the OMP_FOR statement GS starts. */ static inline void gimple_omp_for_set_pre_body (gimple gs, gimple_seq pre_body) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); omp_for_stmt->pre_body = pre_body; } /* Return the clauses associated with OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_clauses (const_gimple gs) { const gimple_statement_omp_parallel *omp_parallel_stmt = as_a <const gimple_statement_omp_parallel> (gs); return omp_parallel_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_clauses_ptr (gimple gs) { gimple_statement_omp_parallel *omp_parallel_stmt = as_a <gimple_statement_omp_parallel> (gs); return &omp_parallel_stmt->clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_parallel *omp_parallel_stmt = as_a <gimple_statement_omp_parallel> (gs); omp_parallel_stmt->clauses = clauses; } /* Return the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_child_fn (const_gimple gs) { const gimple_statement_omp_parallel *omp_parallel_stmt = as_a <const gimple_statement_omp_parallel> (gs); return omp_parallel_stmt->child_fn; } /* Return a pointer to the child function used to hold the body of OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_child_fn_ptr (gimple gs) { gimple_statement_omp_parallel *omp_parallel_stmt = as_a <gimple_statement_omp_parallel> (gs); return &omp_parallel_stmt->child_fn; } /* Set CHILD_FN to be the child function for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_child_fn (gimple gs, tree child_fn) { gimple_statement_omp_parallel *omp_parallel_stmt = as_a <gimple_statement_omp_parallel> (gs); omp_parallel_stmt->child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_PARALLEL GS. */ static inline tree gimple_omp_parallel_data_arg (const_gimple gs) { const gimple_statement_omp_parallel *omp_parallel_stmt = as_a <const gimple_statement_omp_parallel> (gs); return omp_parallel_stmt->data_arg; } /* Return a pointer to the data argument for OMP_PARALLEL GS. */ static inline tree * gimple_omp_parallel_data_arg_ptr (gimple gs) { gimple_statement_omp_parallel *omp_parallel_stmt = as_a <gimple_statement_omp_parallel> (gs); return &omp_parallel_stmt->data_arg; } /* Set DATA_ARG to be the data argument for OMP_PARALLEL GS. */ static inline void gimple_omp_parallel_set_data_arg (gimple gs, tree data_arg) { gimple_statement_omp_parallel *omp_parallel_stmt = as_a <gimple_statement_omp_parallel> (gs); omp_parallel_stmt->data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_task_clauses (const_gimple gs) { const gimple_statement_omp_task *omp_task_stmt = as_a <const gimple_statement_omp_task> (gs); return omp_task_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_task_clauses_ptr (gimple gs) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); return &omp_task_stmt->clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_task_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); omp_task_stmt->clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_child_fn (const_gimple gs) { const gimple_statement_omp_task *omp_task_stmt = as_a <const gimple_statement_omp_task> (gs); return omp_task_stmt->child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_child_fn_ptr (gimple gs) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); return &omp_task_stmt->child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_task_set_child_fn (gimple gs, tree child_fn) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); omp_task_stmt->child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_task_data_arg (const_gimple gs) { const gimple_statement_omp_task *omp_task_stmt = as_a <const gimple_statement_omp_task> (gs); return omp_task_stmt->data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_task_data_arg_ptr (gimple gs) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); return &omp_task_stmt->data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_task_set_data_arg (gimple gs, tree data_arg) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); omp_task_stmt->data_arg = data_arg; } /* Return the clauses associated with OMP_TASK GS. */ static inline tree gimple_omp_taskreg_clauses (const_gimple gs) { const gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <const gimple_statement_omp_taskreg> (gs); return omp_taskreg_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_clauses_ptr (gimple gs) { gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <gimple_statement_omp_taskreg> (gs); return &omp_taskreg_stmt->clauses; } /* Set CLAUSES to be the list of clauses associated with OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <gimple_statement_omp_taskreg> (gs); omp_taskreg_stmt->clauses = clauses; } /* Return the child function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_taskreg_child_fn (const_gimple gs) { const gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <const gimple_statement_omp_taskreg> (gs); return omp_taskreg_stmt->child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_child_fn_ptr (gimple gs) { gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <gimple_statement_omp_taskreg> (gs); return &omp_taskreg_stmt->child_fn; } /* Set CHILD_FN to be the child function for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_child_fn (gimple gs, tree child_fn) { gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <gimple_statement_omp_taskreg> (gs); omp_taskreg_stmt->child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TASK GS. */ static inline tree gimple_omp_taskreg_data_arg (const_gimple gs) { const gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <const gimple_statement_omp_taskreg> (gs); return omp_taskreg_stmt->data_arg; } /* Return a pointer to the data argument for OMP_TASK GS. */ static inline tree * gimple_omp_taskreg_data_arg_ptr (gimple gs) { gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <gimple_statement_omp_taskreg> (gs); return &omp_taskreg_stmt->data_arg; } /* Set DATA_ARG to be the data argument for OMP_TASK GS. */ static inline void gimple_omp_taskreg_set_data_arg (gimple gs, tree data_arg) { gimple_statement_omp_taskreg *omp_taskreg_stmt = as_a <gimple_statement_omp_taskreg> (gs); omp_taskreg_stmt->data_arg = data_arg; } /* Return the copy function used to hold the body of OMP_TASK GS. */ static inline tree gimple_omp_task_copy_fn (const_gimple gs) { const gimple_statement_omp_task *omp_task_stmt = as_a <const gimple_statement_omp_task> (gs); return omp_task_stmt->copy_fn; } /* Return a pointer to the copy function used to hold the body of OMP_TASK GS. */ static inline tree * gimple_omp_task_copy_fn_ptr (gimple gs) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); return &omp_task_stmt->copy_fn; } /* Set CHILD_FN to be the copy function for OMP_TASK GS. */ static inline void gimple_omp_task_set_copy_fn (gimple gs, tree copy_fn) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); omp_task_stmt->copy_fn = copy_fn; } /* Return size of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_size (const_gimple gs) { const gimple_statement_omp_task *omp_task_stmt = as_a <const gimple_statement_omp_task> (gs); return omp_task_stmt->arg_size; } /* Return a pointer to the data block size for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_size_ptr (gimple gs) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); return &omp_task_stmt->arg_size; } /* Set ARG_SIZE to be the data block size for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_size (gimple gs, tree arg_size) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); omp_task_stmt->arg_size = arg_size; } /* Return align of the data block in bytes in OMP_TASK GS. */ static inline tree gimple_omp_task_arg_align (const_gimple gs) { const gimple_statement_omp_task *omp_task_stmt = as_a <const gimple_statement_omp_task> (gs); return omp_task_stmt->arg_align; } /* Return a pointer to the data block align for OMP_TASK GS. */ static inline tree * gimple_omp_task_arg_align_ptr (gimple gs) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); return &omp_task_stmt->arg_align; } /* Set ARG_SIZE to be the data block align for OMP_TASK GS. */ static inline void gimple_omp_task_set_arg_align (gimple gs, tree arg_align) { gimple_statement_omp_task *omp_task_stmt = as_a <gimple_statement_omp_task> (gs); omp_task_stmt->arg_align = arg_align; } /* Return the clauses associated with OMP_SINGLE GS. */ static inline tree gimple_omp_single_clauses (const_gimple gs) { const gimple_statement_omp_single *omp_single_stmt = as_a <const gimple_statement_omp_single> (gs); return omp_single_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_SINGLE GS. */ static inline tree * gimple_omp_single_clauses_ptr (gimple gs) { gimple_statement_omp_single *omp_single_stmt = as_a <gimple_statement_omp_single> (gs); return &omp_single_stmt->clauses; } /* Set CLAUSES to be the clauses associated with OMP_SINGLE GS. */ static inline void gimple_omp_single_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_single *omp_single_stmt = as_a <gimple_statement_omp_single> (gs); omp_single_stmt->clauses = clauses; } /* Return the clauses associated with OMP_TARGET GS. */ static inline tree gimple_omp_target_clauses (const_gimple gs) { const gimple_statement_omp_target *omp_target_stmt = as_a <const gimple_statement_omp_target> (gs); return omp_target_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_TARGET GS. */ static inline tree * gimple_omp_target_clauses_ptr (gimple gs) { gimple_statement_omp_target *omp_target_stmt = as_a <gimple_statement_omp_target> (gs); return &omp_target_stmt->clauses; } /* Set CLAUSES to be the clauses associated with OMP_TARGET GS. */ static inline void gimple_omp_target_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_target *omp_target_stmt = as_a <gimple_statement_omp_target> (gs); omp_target_stmt->clauses = clauses; } /* Return the kind of OMP target statemement. */ static inline int gimple_omp_target_kind (const_gimple g) { GIMPLE_CHECK (g, GIMPLE_OMP_TARGET); return (gimple_omp_subcode (g) & GF_OMP_TARGET_KIND_MASK); } /* Set the OMP target kind. */ static inline void gimple_omp_target_set_kind (gimple g, int kind) { GIMPLE_CHECK (g, GIMPLE_OMP_TARGET); g->subcode = (g->subcode & ~GF_OMP_TARGET_KIND_MASK) | (kind & GF_OMP_TARGET_KIND_MASK); } /* Return the child function used to hold the body of OMP_TARGET GS. */ static inline tree gimple_omp_target_child_fn (const_gimple gs) { const gimple_statement_omp_target *omp_target_stmt = as_a <const gimple_statement_omp_target> (gs); return omp_target_stmt->child_fn; } /* Return a pointer to the child function used to hold the body of OMP_TARGET GS. */ static inline tree * gimple_omp_target_child_fn_ptr (gimple gs) { gimple_statement_omp_target *omp_target_stmt = as_a <gimple_statement_omp_target> (gs); return &omp_target_stmt->child_fn; } /* Set CHILD_FN to be the child function for OMP_TARGET GS. */ static inline void gimple_omp_target_set_child_fn (gimple gs, tree child_fn) { gimple_statement_omp_target *omp_target_stmt = as_a <gimple_statement_omp_target> (gs); omp_target_stmt->child_fn = child_fn; } /* Return the artificial argument used to send variables and values from the parent to the children threads in OMP_TARGET GS. */ static inline tree gimple_omp_target_data_arg (const_gimple gs) { const gimple_statement_omp_target *omp_target_stmt = as_a <const gimple_statement_omp_target> (gs); return omp_target_stmt->data_arg; } /* Return a pointer to the data argument for OMP_TARGET GS. */ static inline tree * gimple_omp_target_data_arg_ptr (gimple gs) { gimple_statement_omp_target *omp_target_stmt = as_a <gimple_statement_omp_target> (gs); return &omp_target_stmt->data_arg; } /* Set DATA_ARG to be the data argument for OMP_TARGET GS. */ static inline void gimple_omp_target_set_data_arg (gimple gs, tree data_arg) { gimple_statement_omp_target *omp_target_stmt = as_a <gimple_statement_omp_target> (gs); omp_target_stmt->data_arg = data_arg; } /* Return the clauses associated with OMP_TEAMS GS. */ static inline tree gimple_omp_teams_clauses (const_gimple gs) { const gimple_statement_omp_teams *omp_teams_stmt = as_a <const gimple_statement_omp_teams> (gs); return omp_teams_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_TEAMS GS. */ static inline tree * gimple_omp_teams_clauses_ptr (gimple gs) { gimple_statement_omp_teams *omp_teams_stmt = as_a <gimple_statement_omp_teams> (gs); return &omp_teams_stmt->clauses; } /* Set CLAUSES to be the clauses associated with OMP_TEAMS GS. */ static inline void gimple_omp_teams_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_teams *omp_teams_stmt = as_a <gimple_statement_omp_teams> (gs); omp_teams_stmt->clauses = clauses; } /* Return the clauses associated with OMP_SECTIONS GS. */ static inline tree gimple_omp_sections_clauses (const_gimple gs) { const gimple_statement_omp_sections *omp_sections_stmt = as_a <const gimple_statement_omp_sections> (gs); return omp_sections_stmt->clauses; } /* Return a pointer to the clauses associated with OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_clauses_ptr (gimple gs) { gimple_statement_omp_sections *omp_sections_stmt = as_a <gimple_statement_omp_sections> (gs); return &omp_sections_stmt->clauses; } /* Set CLAUSES to be the set of clauses associated with OMP_SECTIONS GS. */ static inline void gimple_omp_sections_set_clauses (gimple gs, tree clauses) { gimple_statement_omp_sections *omp_sections_stmt = as_a <gimple_statement_omp_sections> (gs); omp_sections_stmt->clauses = clauses; } /* Return the control variable associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline tree gimple_omp_sections_control (const_gimple gs) { const gimple_statement_omp_sections *omp_sections_stmt = as_a <const gimple_statement_omp_sections> (gs); return omp_sections_stmt->control; } /* Return a pointer to the clauses associated with the GIMPLE_OMP_SECTIONS GS. */ static inline tree * gimple_omp_sections_control_ptr (gimple gs) { gimple_statement_omp_sections *omp_sections_stmt = as_a <gimple_statement_omp_sections> (gs); return &omp_sections_stmt->control; } /* Set CONTROL to be the set of clauses associated with the GIMPLE_OMP_SECTIONS in GS. */ static inline void gimple_omp_sections_set_control (gimple gs, tree control) { gimple_statement_omp_sections *omp_sections_stmt = as_a <gimple_statement_omp_sections> (gs); omp_sections_stmt->control = control; } /* Set COND to be the condition code for OMP_FOR GS. */ static inline void gimple_omp_for_set_cond (gimple gs, size_t i, enum tree_code cond) { gimple_statement_omp_for *omp_for_stmt = as_a <gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (TREE_CODE_CLASS (cond) == tcc_comparison && i < omp_for_stmt->collapse); omp_for_stmt->iter[i].cond = cond; } /* Return the condition code associated with OMP_FOR GS. */ static inline enum tree_code gimple_omp_for_cond (const_gimple gs, size_t i) { const gimple_statement_omp_for *omp_for_stmt = as_a <const gimple_statement_omp_for> (gs); gcc_gimple_checking_assert (i < omp_for_stmt->collapse); return omp_for_stmt->iter[i].cond; } /* Set the value being stored in an atomic store. */ static inline void gimple_omp_atomic_store_set_val (gimple g, tree val) { gimple_statement_omp_atomic_store *omp_atomic_store_stmt = as_a <gimple_statement_omp_atomic_store> (g); omp_atomic_store_stmt->val = val; } /* Return the value being stored in an atomic store. */ static inline tree gimple_omp_atomic_store_val (const_gimple g) { const gimple_statement_omp_atomic_store *omp_atomic_store_stmt = as_a <const gimple_statement_omp_atomic_store> (g); return omp_atomic_store_stmt->val; } /* Return a pointer to the value being stored in an atomic store. */ static inline tree * gimple_omp_atomic_store_val_ptr (gimple g) { gimple_statement_omp_atomic_store *omp_atomic_store_stmt = as_a <gimple_statement_omp_atomic_store> (g); return &omp_atomic_store_stmt->val; } /* Set the LHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_lhs (gimple g, tree lhs) { gimple_statement_omp_atomic_load *omp_atomic_load_stmt = as_a <gimple_statement_omp_atomic_load> (g); omp_atomic_load_stmt->lhs = lhs; } /* Get the LHS of an atomic load. */ static inline tree gimple_omp_atomic_load_lhs (const_gimple g) { const gimple_statement_omp_atomic_load *omp_atomic_load_stmt = as_a <const gimple_statement_omp_atomic_load> (g); return omp_atomic_load_stmt->lhs; } /* Return a pointer to the LHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_lhs_ptr (gimple g) { gimple_statement_omp_atomic_load *omp_atomic_load_stmt = as_a <gimple_statement_omp_atomic_load> (g); return &omp_atomic_load_stmt->lhs; } /* Set the RHS of an atomic load. */ static inline void gimple_omp_atomic_load_set_rhs (gimple g, tree rhs) { gimple_statement_omp_atomic_load *omp_atomic_load_stmt = as_a <gimple_statement_omp_atomic_load> (g); omp_atomic_load_stmt->rhs = rhs; } /* Get the RHS of an atomic load. */ static inline tree gimple_omp_atomic_load_rhs (const_gimple g) { const gimple_statement_omp_atomic_load *omp_atomic_load_stmt = as_a <const gimple_statement_omp_atomic_load> (g); return omp_atomic_load_stmt->rhs; } /* Return a pointer to the RHS of an atomic load. */ static inline tree * gimple_omp_atomic_load_rhs_ptr (gimple g) { gimple_statement_omp_atomic_load *omp_atomic_load_stmt = as_a <gimple_statement_omp_atomic_load> (g); return &omp_atomic_load_stmt->rhs; } /* Get the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_def (const_gimple g) { const gimple_statement_omp_continue *omp_continue_stmt = as_a <const gimple_statement_omp_continue> (g); return omp_continue_stmt->control_def; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_def_ptr (gimple g) { gimple_statement_omp_continue *omp_continue_stmt = as_a <gimple_statement_omp_continue> (g); return &omp_continue_stmt->control_def; } /* Set the definition of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_def (gimple g, tree def) { gimple_statement_omp_continue *omp_continue_stmt = as_a <gimple_statement_omp_continue> (g); omp_continue_stmt->control_def = def; } /* Get the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline tree gimple_omp_continue_control_use (const_gimple g) { const gimple_statement_omp_continue *omp_continue_stmt = as_a <const gimple_statement_omp_continue> (g); return omp_continue_stmt->control_use; } /* The same as above, but return the address. */ static inline tree * gimple_omp_continue_control_use_ptr (gimple g) { gimple_statement_omp_continue *omp_continue_stmt = as_a <gimple_statement_omp_continue> (g); return &omp_continue_stmt->control_use; } /* Set the use of the control variable in a GIMPLE_OMP_CONTINUE. */ static inline void gimple_omp_continue_set_control_use (gimple g, tree use) { gimple_statement_omp_continue *omp_continue_stmt = as_a <gimple_statement_omp_continue> (g); omp_continue_stmt->control_use = use; } /* Return a pointer to the body for the GIMPLE_TRANSACTION statement GS. */ static inline gimple_seq * gimple_transaction_body_ptr (gimple gs) { gimple_statement_transaction *transaction_stmt = as_a <gimple_statement_transaction> (gs); return &transaction_stmt->body; } /* Return the body for the GIMPLE_TRANSACTION statement GS. */ static inline gimple_seq gimple_transaction_body (gimple gs) { return *gimple_transaction_body_ptr (gs); } /* Return the label associated with a GIMPLE_TRANSACTION. */ static inline tree gimple_transaction_label (const_gimple gs) { const gimple_statement_transaction *transaction_stmt = as_a <const gimple_statement_transaction> (gs); return transaction_stmt->label; } static inline tree * gimple_transaction_label_ptr (gimple gs) { gimple_statement_transaction *transaction_stmt = as_a <gimple_statement_transaction> (gs); return &transaction_stmt->label; } /* Return the subcode associated with a GIMPLE_TRANSACTION. */ static inline unsigned int gimple_transaction_subcode (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); return gs->subcode; } /* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */ static inline void gimple_transaction_set_body (gimple gs, gimple_seq body) { gimple_statement_transaction *transaction_stmt = as_a <gimple_statement_transaction> (gs); transaction_stmt->body = body; } /* Set the label associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_label (gimple gs, tree label) { gimple_statement_transaction *transaction_stmt = as_a <gimple_statement_transaction> (gs); transaction_stmt->label = label; } /* Set the subcode associated with a GIMPLE_TRANSACTION. */ static inline void gimple_transaction_set_subcode (gimple gs, unsigned int subcode) { GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); gs->subcode = subcode; } /* Return a pointer to the return value for GIMPLE_RETURN GS. */ static inline tree * gimple_return_retval_ptr (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op_ptr (gs, 0); } /* Return the return value for GIMPLE_RETURN GS. */ static inline tree gimple_return_retval (const_gimple gs) { GIMPLE_CHECK (gs, GIMPLE_RETURN); return gimple_op (gs, 0); } /* Set RETVAL to be the return value for GIMPLE_RETURN GS. */ static inline void gimple_return_set_retval (gimple gs, tree retval) { GIMPLE_CHECK (gs, GIMPLE_RETURN); gimple_set_op (gs, 0, retval); } /* Returns true when the gimple statement STMT is any of the OpenMP types. */ #define CASE_GIMPLE_OMP \ case GIMPLE_OMP_PARALLEL: \ case GIMPLE_OMP_TASK: \ case GIMPLE_OMP_FOR: \ case GIMPLE_OMP_SECTIONS: \ case GIMPLE_OMP_SECTIONS_SWITCH: \ case GIMPLE_OMP_SINGLE: \ case GIMPLE_OMP_TARGET: \ case GIMPLE_OMP_TEAMS: \ case GIMPLE_OMP_SECTION: \ case GIMPLE_OMP_MASTER: \ case GIMPLE_OMP_TASKGROUP: \ case GIMPLE_OMP_ORDERED: \ case GIMPLE_OMP_CRITICAL: \ case GIMPLE_OMP_RETURN: \ case GIMPLE_OMP_ATOMIC_LOAD: \ case GIMPLE_OMP_ATOMIC_STORE: \ case GIMPLE_OMP_CONTINUE static inline bool is_gimple_omp (const_gimple stmt) { switch (gimple_code (stmt)) { CASE_GIMPLE_OMP: return true; default: return false; } } /* Returns TRUE if statement G is a GIMPLE_NOP. */ static inline bool gimple_nop_p (const_gimple g) { return gimple_code (g) == GIMPLE_NOP; } /* Return true if GS is a GIMPLE_RESX. */ static inline bool is_gimple_resx (const_gimple gs) { return gimple_code (gs) == GIMPLE_RESX; } /* Return the predictor of GIMPLE_PREDICT statement GS. */ static inline enum br_predictor gimple_predict_predictor (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (enum br_predictor) (gs->subcode & ~GF_PREDICT_TAKEN); } /* Set the predictor of GIMPLE_PREDICT statement GS to PREDICT. */ static inline void gimple_predict_set_predictor (gimple gs, enum br_predictor predictor) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); gs->subcode = (gs->subcode & GF_PREDICT_TAKEN) | (unsigned) predictor; } /* Return the outcome of GIMPLE_PREDICT statement GS. */ static inline enum prediction gimple_predict_outcome (gimple gs) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); return (gs->subcode & GF_PREDICT_TAKEN) ? TAKEN : NOT_TAKEN; } /* Set the outcome of GIMPLE_PREDICT statement GS to OUTCOME. */ static inline void gimple_predict_set_outcome (gimple gs, enum prediction outcome) { GIMPLE_CHECK (gs, GIMPLE_PREDICT); if (outcome == TAKEN) gs->subcode |= GF_PREDICT_TAKEN; else gs->subcode &= ~GF_PREDICT_TAKEN; } /* Return the type of the main expression computed by STMT. Return void_type_node if the statement computes nothing. */ static inline tree gimple_expr_type (const_gimple stmt) { enum gimple_code code = gimple_code (stmt); if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL) { tree type; /* In general we want to pass out a type that can be substituted for both the RHS and the LHS types if there is a possibly useless conversion involved. That means returning the original RHS type as far as we can reconstruct it. */ if (code == GIMPLE_CALL) { if (gimple_call_internal_p (stmt) && gimple_call_internal_fn (stmt) == IFN_MASK_STORE) type = TREE_TYPE (gimple_call_arg (stmt, 3)); else type = gimple_call_return_type (stmt); } else switch (gimple_assign_rhs_code (stmt)) { case POINTER_PLUS_EXPR: type = TREE_TYPE (gimple_assign_rhs1 (stmt)); break; default: /* As fallback use the type of the LHS. */ type = TREE_TYPE (gimple_get_lhs (stmt)); break; } return type; } else if (code == GIMPLE_COND) return boolean_type_node; else return void_type_node; } /* Enum and arrays used for allocation stats. Keep in sync with gimple.c:gimple_alloc_kind_names. */ enum gimple_alloc_kind { gimple_alloc_kind_assign, /* Assignments. */ gimple_alloc_kind_phi, /* PHI nodes. */ gimple_alloc_kind_cond, /* Conditionals. */ gimple_alloc_kind_rest, /* Everything else. */ gimple_alloc_kind_all }; extern int gimple_alloc_counts[]; extern int gimple_alloc_sizes[]; /* Return the allocation kind for a given stmt CODE. */ static inline enum gimple_alloc_kind gimple_alloc_kind (enum gimple_code code) { switch (code) { case GIMPLE_ASSIGN: return gimple_alloc_kind_assign; case GIMPLE_PHI: return gimple_alloc_kind_phi; case GIMPLE_COND: return gimple_alloc_kind_cond; default: return gimple_alloc_kind_rest; } } /* Return true if a location should not be emitted for this statement by annotate_all_with_location. */ static inline bool gimple_do_not_emit_location_p (gimple g) { return gimple_plf (g, GF_PLF_1); } /* Mark statement G so a location will not be emitted by annotate_one_with_location. */ static inline void gimple_set_do_not_emit_location (gimple g) { /* The PLF flags are initialized to 0 when a new tuple is created, so no need to initialize it anywhere. */ gimple_set_plf (g, GF_PLF_1, true); } /* Macros for showing usage statistics. */ #define SCALE(x) ((unsigned long) ((x) < 1024*10 \ ? (x) \ : ((x) < 1024*1024*10 \ ? (x) / 1024 \ : (x) / (1024*1024)))) #define LABEL(x) ((x) < 1024*10 ? 'b' : ((x) < 1024*1024*10 ? 'k' : 'M')) #endif /* GCC_GIMPLE_H */
teams.c
#pragma omp teams [clauses] structured-block